diff --git a/.chainsaw.yaml b/.chainsaw.yaml new file mode 100644 index 00000000..e3644593 --- /dev/null +++ b/.chainsaw.yaml @@ -0,0 +1,12 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/configuration-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Configuration +metadata: + name: configuration +spec: + timeouts: + assert: 5m0s + cleanup: 5m0s + delete: 5m0s + error: 5m0s + exec: 5m0s diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 75ee545b..13a46e0e 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,3 +1,16 @@ + + ### General: * [ ] Have you removed all sensitive information, including but not limited to access keys and passwords? diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000..70f9fe7f --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,38 @@ +version: 2 +updates: + +# Go - root directory + - package-ecosystem: "gomod" + directory: "/" + schedule: + interval: "weekly" + ## group all dependencies with a k8s.io prefix into a single PR. + groups: + kubernetes: + patterns: [ "k8s.io/*", "sigs.k8s.io/*" ] + otel: + patterns: ["go.opentelemetry.io/*"] + commit-message: + prefix: ":seedling:" + labels: + - "dependencies" + +# Docker + - package-ecosystem: "docker" + directory: "/" + schedule: + interval: "weekly" + commit-message: + prefix: ":seedling:" + labels: + - "dependencies" + +# github-actions + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" + commit-message: + prefix: ":seedling:" + labels: + - "dependencies" diff --git a/.github/filters.yml b/.github/filters.yml new file mode 100644 index 00000000..a790d19d --- /dev/null +++ b/.github/filters.yml @@ -0,0 +1,3 @@ +# Any file that is not a doc *.md file +src: + - "!**/*.md" diff --git a/.github/labels.yml b/.github/labels.yml index f9b89f11..7b91ed41 100644 --- a/.github/labels.yml +++ b/.github/labels.yml @@ -1,24 +1,35 @@ -- name: added-feature +# PR Labels +- name: new-feature description: for new features in the changelog. - color: a2eeef -- name: changed - description: for changes in existing functionality in the changelog. - color: a2eeef -- name: deprecated - description: for soon-to-be removed features in the changelog. - color: e4e669 -- name: removed - description: for now removed features in the changelog. - color: e4e669 + color: 225fee +- name: improvement + description: for improvements in existing functionality in the changelog. + color: 22ee47 +- name: repo-ci-improvement + description: for improvements in the repository or CI workflow in the changelog. + color: c922ee - name: bugfix description: for any bug fixes in the changelog. - color: d73a4a -- name: security - description: for vulnerabilities in the changelog. - color: dd4739 -- name: bug - description: Something isn't working in this issue. - color: d73a4a + color: ed8e21 +- name: documentation + description: for updates to the documentation in the changelog. + color: d3e1e6 +- name: dependencies + description: dependency updates including security fixes + color: 5c9dff +- name: testing + description: for updates to the testing suite in the changelog. + color: 933ac9 +- name: breaking-change + description: for breaking changes in the changelog. + color: ff0000 +- name: ignore-for-release + description: PRs you do not want to render in the changelog. + color: 7b8eac +# Issue Labels - name: enhancement - description: New feature request in this issue. - color: a2eeef + description: issues that request a enhancement. + color: 22ee47 +- name: bug + description: issues that report a bug. + color: ed8e21 diff --git a/.github/release-drafter.yml b/.github/release-drafter.yml new file mode 100644 index 00000000..d880f84a --- /dev/null +++ b/.github/release-drafter.yml @@ -0,0 +1,67 @@ +name-template: 'v$NEXT_PATCH_VERSION' +tag-template: 'v$NEXT_PATCH_VERSION' +exclude-labels: + - ignore-for-release +categories: + - title: โš ๏ธ Breaking Change + labels: + - breaking-change + - title: ๐Ÿ› Bug Fixes + labels: + - bugfix + - title: ๐Ÿš€ New Features + labels: + - new-feature + - title: ๐Ÿ’ก Improvements + labels: + - improvement + - title: ๐Ÿงช Testing Improvements + labels: + - testing + - title: โš™๏ธ Repo/CI Improvements + labels: + - repo-ci-improvement + - title: ๐Ÿ“– Documentation + labels: + - documentation + - title: ๐Ÿ“ฆ Dependency Updates + labels: + - dependencies + - title: Other Changes + labels: + - "*" +autolabeler: + - label: 'breaking-change' + title: + - '/.*\[breaking\].+/' + - label: 'deprecation' + title: + - '/.*\[deprecation\].+/' + - label: 'bugfix' + title: + - '/.*\[fix\].+/' + - label: 'new-feature' + title: + - '/.*\[feat\].+/' + - label: 'improvement' + title: + - '/.*\[improvement\].+/' + - label: 'testing' + title: + - '/.*\[test\].+/' + - label: 'repo-ci-improvement' + title: + - '/.*\[CI\].+/' + - '/.*\[ci\].+/' + - label: 'documentation' + title: + - '/.*\[docs\].+/' + - label: 'dependencies' + title: + - '/.*\[deps\].+/' + +change-template: '- $TITLE by @$AUTHOR in #$NUMBER' +no-changes-template: "- No changes" +template: | + ## What's Changed + $CHANGES diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 45cf6169..c034a43a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -4,35 +4,103 @@ on: push: branches: - main - pull_request: null + pull_request_target: null + +permissions: + contents: read + pull-requests: read + actions: read + +concurrency: + group: ci-${{ github.ref }} + cancel-in-progress: true jobs: - ci: + changes: runs-on: ubuntu-latest - strategy: - matrix: - go-version: [ 'stable', 'oldstable', '1.20' ] + outputs: + paths: ${{ steps.filter.outputs.changes }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v4.2.2 with: - fetch-depth: 0 - - uses: actions/setup-go@v4 + ref: ${{ github.event.pull_request.head.sha }} + - name: Harden Runner + uses: step-security/harden-runner@v2 with: - go-version: ${{ matrix.go-version }} + disable-sudo: true + egress-policy: block + allowed-endpoints: > + api.github.com:443 + github.com:443 + - uses: dorny/paths-filter@v3 + id: filter + with: + base: ${{ github.ref }} + filters: .github/filters.yml + + build-test: + runs-on: ubuntu-latest + environment: ${{ github.event.pull_request.head.repo.fork == true && 'prod-external' || 'prod' }} + needs: changes + if: ${{ contains(fromJSON(needs.changes.outputs.paths), 'src') }} + steps: + - name: Harden Runner + uses: step-security/harden-runner@v2 + with: + disable-sudo: true + egress-policy: block + allowed-endpoints: > + api.github.com:443 + github.com:443 + golang.org:443 + proxy.golang.org:443 + sum.golang.org:443 + objects.githubusercontent.com:443 + storage.googleapis.com:443 + cli.codecov.io:443 + api.codecov.io:443 + ingest.codecov.io:443 + raw.githubusercontent.com:443 + get.helm.sh:443 + + - uses: actions/checkout@v4.2.2 + with: + ref: ${{ github.event.pull_request.head.sha }} + - uses: actions/setup-go@v5 + with: + go-version-file: go.mod + check-latest: true + - name: Vet run: make vet - - name: Lint - run: make lint + + - name: lint + uses: golangci/golangci-lint-action@v6 + with: + version: v1.62.2 + - name: Helm Lint run: make helm-lint + - name: Test run: make test + + - name: Upload coverage reports to Codecov + uses: codecov/codecov-action@v5 + with: + files: ./coverage.out + verbose: true + token: ${{ secrets.CODECOV_TOKEN }} + slug: linode/linode-cloud-controller-manager + - name: Build run: make build + docker-build: runs-on: ubuntu-latest + environment: ${{ github.event.pull_request.head.repo.fork == true && 'prod-external' || 'prod' }} steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v4.2.2 with: fetch-depth: 0 - name: Docker Meta @@ -45,7 +113,7 @@ jobs: type=raw,value=latest,enable=${{ github.ref == format('refs/heads/{0}', 'main') }} type=semver,pattern={{raw}},value=${{ github.ref_name }} - name: Build Dockerfile - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 with: context: . push: false @@ -53,3 +121,49 @@ jobs: labels: ${{ steps.meta.outputs.labels }} build-args: | REV=${{ github.ref_name }} + + e2e-tests: + runs-on: ubuntu-latest + environment: ${{ github.event.pull_request.head.repo.fork == true && 'prod-external' || 'prod' }} + needs: changes + if: ${{ contains(fromJSON(needs.changes.outputs.paths), 'src') }} + env: + GITHUB_TOKEN: ${{ secrets.github_token }} + LINODE_TOKEN: ${{ secrets.LINODE_TOKEN }} + IMG: linode/linode-cloud-controller-manager:${{ github.ref == 'refs/heads/main' && 'latest' || format('pr-{0}', github.event.number) || github.ref_name }} + LINODE_REGION: us-lax + LINODE_CONTROL_PLANE_MACHINE_TYPE: g6-standard-2 + LINODE_MACHINE_TYPE: g6-standard-2 + WORKER_NODES: '2' + steps: + - uses: actions/checkout@v4.2.2 + with: + ref: ${{ github.event.pull_request.head.sha }} + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version-file: 'go.mod' + check-latest: true + + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Install devbox + uses: jetify-com/devbox-install-action@v0.12.0 + + - name: Setup CAPL Management Kind Cluster and CAPL Child Cluster For Testing + run: devbox run mgmt-and-capl-cluster + + - name: Run E2E Tests + run: devbox run e2e-test + + - name: Run Cilium BGP e2e test + run: devbox run e2e-test-bgp + + - name: Cleanup Resources + if: always() + run: devbox run cleanup-cluster diff --git a/.github/workflows/helm.yml b/.github/workflows/helm.yml index 17df884e..85322888 100644 --- a/.github/workflows/helm.yml +++ b/.github/workflows/helm.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v4.2.2 with: fetch-depth: 0 @@ -26,15 +26,15 @@ jobs: sed -ie "s/version: 0.0.0/version: ${TAG#helm-}/g" deploy/chart/Chart.yaml - name: Set up Helm - uses: azure/setup-helm@v3 + uses: azure/setup-helm@v4 - - uses: actions/setup-python@v4 + - uses: actions/setup-python@v5 with: python-version: '3.10' check-latest: true - name: Set up chart-testing - uses: helm/chart-testing-action@v2.6.0 + uses: helm/chart-testing-action@v2.7.0 - name: Run chart-testing (lint) run: ct lint --check-version-increment=false --chart-dirs deploy --target-branch ${{ github.event.repository.default_branch }} @@ -47,14 +47,14 @@ jobs: # run: ct install --chart-dirs helm-chart --namespace kube-system --helm-extra-set-args "--set=apiToken=test --set=region=us-east" --target-branch ${{ github.event.repository.default_branch }} helm-release: - if: github.ref == 'refs/heads/main' + if: ${{ startsWith(github.ref, 'refs/tags/') }} needs: helm-test permissions: contents: write # for helm/chart-releaser-action to push chart release and create a release runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v4.2.2 with: fetch-depth: 0 @@ -70,10 +70,10 @@ jobs: git config user.email "$GITHUB_ACTOR@users.noreply.github.com" - name: Set up Helm - uses: azure/setup-helm@v3 + uses: azure/setup-helm@v4 - name: Run chart-releaser - uses: helm/chart-releaser-action@v1.6.0 + uses: helm/chart-releaser-action@v1.7.0 env: CR_TOKEN: "${{ secrets.GITHUB_TOKEN }}" CR_RELEASE_NAME_TEMPLATE: "helm-{{ .Version }}" diff --git a/.github/workflows/label-sync.yml b/.github/workflows/label-sync.yml index 9b097ecf..f502b3e6 100644 --- a/.github/workflows/label-sync.yml +++ b/.github/workflows/label-sync.yml @@ -9,7 +9,7 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@5a4ac9002d0be2fb38bd78e4b4dbde5606d7042f # pin@v2 + - uses: actions/checkout@cbb722410c2e876e24abbe8de2cc27693e501dcb # pin@v2 - uses: micnncim/action-label-syncer@3abd5ab72fda571e69fffd97bd4e0033dd5f495c # pin@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/pr-labeler.yml b/.github/workflows/pr-labeler.yml new file mode 100644 index 00000000..89a24f7c --- /dev/null +++ b/.github/workflows/pr-labeler.yml @@ -0,0 +1,25 @@ +name: PR labeler + +on: + workflow_dispatch: + pull_request_target: + types: [opened, reopened, synchronize] + +jobs: + label-pr: + name: Update PR labels + permissions: + contents: write + pull-requests: write + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4.2.2 + with: + fetch-depth: 0 + - name: Label PR + uses: release-drafter/release-drafter@v6 + with: + disable-releaser: github.ref != 'refs/heads/main' + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/release-drafter.yml b/.github/workflows/release-drafter.yml new file mode 100644 index 00000000..cf85d7e3 --- /dev/null +++ b/.github/workflows/release-drafter.yml @@ -0,0 +1,21 @@ +name: Release Drafter + +on: + workflow_dispatch: + push: + branches: + - main + +permissions: + contents: read + +jobs: + update_release_draft: + permissions: + contents: write + pull-requests: write + runs-on: ubuntu-latest + steps: + - uses: release-drafter/release-drafter@v6 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index bb27f50d..f3bf0096 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -8,7 +8,7 @@ jobs: release: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v4.2.2 with: fetch-depth: 0 - name: Create Release Artifacts @@ -16,7 +16,7 @@ jobs: env: IMAGE_VERSION: ${{ github.ref_name }} - name: Upload Release Artifacts - uses: softprops/action-gh-release@v1 + uses: softprops/action-gh-release@v2 with: files: | ./release/helm-chart-${{ github.ref_name }}.tgz @@ -35,7 +35,7 @@ jobs: username: ${{ secrets.DOCKER_USERNAME }} password: ${{ secrets.DOCKER_PASSWORD }} - name: Build and Push to Docker Hub - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 with: context: . push: true diff --git a/.gitignore b/.gitignore index 87eb4e71..d23661ad 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,7 @@ linode-cloud-controller-manager # Folders _obj _test +bin # Architecture specific extensions/prefixes *.[568vq] diff --git a/.golangci.yml b/.golangci.yml index 19df20d2..fcb5072d 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -15,7 +15,7 @@ linters: # cherry picked from https://golangci-lint.run/usage/linters/ # - ginkgolinter # to be enabled once #158 is merged - bodyclose - - exportloopref + - copyloopvar - gocheckcompilerdirectives - gofmt - goimports diff --git a/Dockerfile b/Dockerfile index 20942766..24ed3827 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.21-alpine as builder +FROM golang:1.23-alpine AS builder RUN mkdir -p /linode WORKDIR /linode @@ -11,7 +11,7 @@ COPY sentry ./sentry RUN go mod download RUN go build -a -ldflags '-extldflags "-static"' -o /bin/linode-cloud-controller-manager-linux /linode -FROM alpine:3.18.4 +FROM alpine:3.21.2 RUN apk add --update --no-cache ca-certificates LABEL maintainers="Linode" LABEL description="Linode Cloud Controller Manager" diff --git a/Makefile b/Makefile index 2bc6d3c5..42a96e2a 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,40 @@ -IMG ?= linode/linode-cloud-controller-manager:canary -RELEASE_DIR ?= release -GOLANGCI_LINT_IMG := golangci/golangci-lint:v1.55-alpine -PLATFORM ?= linux/amd64 +IMG ?= linode/linode-cloud-controller-manager:canary +RELEASE_DIR ?= release +PLATFORM ?= linux/amd64 + +# Use CACHE_BIN for tools that cannot use devbox and LOCALBIN for tools that can use either method +CACHE_BIN ?= $(CURDIR)/bin +LOCALBIN ?= $(CACHE_BIN) + +DEVBOX_BIN ?= $(DEVBOX_PACKAGES_DIR)/bin +HELM ?= $(LOCALBIN)/helm +HELM_VERSION ?= v3.16.3 + +##################################################################### +# Dev Setup +##################################################################### +CLUSTER_NAME ?= ccm-$(shell git rev-parse --short HEAD) +K8S_VERSION ?= "v1.31.2" +CAPI_VERSION ?= "v1.8.5" +CAAPH_VERSION ?= "v0.2.1" +CAPL_VERSION ?= "v0.7.1" +CONTROLPLANE_NODES ?= 1 +WORKER_NODES ?= 1 +LINODE_FIREWALL_ENABLED ?= true +LINODE_REGION ?= us-lax +LINODE_OS ?= linode/ubuntu22.04 +KUBECONFIG_PATH ?= $(CURDIR)/test-cluster-kubeconfig.yaml +MGMT_KUBECONFIG_PATH ?= $(CURDIR)/mgmt-cluster-kubeconfig.yaml + +# if the $DEVBOX_PACKAGES_DIR env variable exists that means we are within a devbox shell and can safely +# use devbox's bin for our tools +ifdef DEVBOX_PACKAGES_DIR + LOCALBIN = $(DEVBOX_BIN) +endif + +export PATH := $(CACHE_BIN):$(PATH) +$(LOCALBIN): + mkdir -p $(LOCALBIN) export GO111MODULE=on @@ -14,6 +47,7 @@ clean: @rm -rf ./.tmp @rm -rf dist/* @rm -rf $(RELEASE_DIR) + @rm -rf $(LOCALBIN) .PHONY: codegen codegen: @@ -25,10 +59,13 @@ vet: fmt .PHONY: lint lint: - docker run --rm -v "$(shell pwd):/var/work:ro" -w /var/work \ - golangci/golangci-lint:v1.55.2 golangci-lint run -v --timeout=5m - docker run --rm -v "$(shell pwd):/var/work:ro" -w /var/work/e2e \ - golangci/golangci-lint:v1.55.2 golangci-lint run -v --timeout=5m + docker run --rm -v "$(PWD):/var/work:ro" -w /var/work \ + golangci/golangci-lint:latest golangci-lint run -c .golangci.yml + +.PHONY: gosec +gosec: ## Run gosec against code. + docker run --rm -v "$(PWD):/var/work:ro" -w /var/work securego/gosec:2.19.0 \ + -exclude-dir=bin -exclude-generated ./... .PHONY: fmt fmt: @@ -37,7 +74,7 @@ fmt: .PHONY: test # we say code is not worth testing unless it's formatted test: fmt codegen - go test -v -cover ./cloud/... $(TEST_ARGS) + go test -v -cover -coverprofile ./coverage.out ./cloud/... ./sentry/... $(TEST_ARGS) .PHONY: build-linux build-linux: codegen @@ -72,9 +109,11 @@ docker-build: build-linux .PHONY: docker-push # must run the docker build before pushing the image docker-push: - echo "[reminder] Did you run `make docker-build`?" docker push ${IMG} +.PHONY: docker-setup +docker-setup: docker-build docker-push + .PHONY: run # run the ccm locally, really only makes sense on linux anyway run: build @@ -91,33 +130,114 @@ run-debug: build --stderrthreshold=INFO \ --kubeconfig=${KUBECONFIG} \ --linodego-debug + +##################################################################### +# E2E Test Setup +##################################################################### + +.PHONY: mgmt-and-capl-cluster +mgmt-and-capl-cluster: docker-setup mgmt-cluster capl-cluster + +.PHONY: capl-cluster +capl-cluster: generate-capl-cluster-manifests create-capl-cluster patch-linode-ccm + +.PHONY: generate-capl-cluster-manifests +generate-capl-cluster-manifests: + # Create the CAPL cluster manifests without any CSI driver stuff + LINODE_FIREWALL_ENABLED=$(LINODE_FIREWALL_ENABLED) LINODE_OS=$(LINODE_OS) clusterctl generate cluster $(CLUSTER_NAME) \ + --kubernetes-version $(K8S_VERSION) --infrastructure linode-linode:$(CAPL_VERSION) \ + --control-plane-machine-count $(CONTROLPLANE_NODES) --worker-machine-count $(WORKER_NODES) > capl-cluster-manifests.yaml + +.PHONY: create-capl-cluster +create-capl-cluster: + # Create a CAPL cluster with updated CCM and wait for it to be ready + kubectl apply -f capl-cluster-manifests.yaml + kubectl wait --for=condition=ControlPlaneReady cluster/$(CLUSTER_NAME) --timeout=600s || (kubectl get cluster -o yaml; kubectl get linodecluster -o yaml; kubectl get linodemachines -o yaml) + kubectl wait --for=condition=NodeHealthy=true machines -l cluster.x-k8s.io/cluster-name=$(CLUSTER_NAME) --timeout=900s + clusterctl get kubeconfig $(CLUSTER_NAME) > $(KUBECONFIG_PATH) + KUBECONFIG=$(KUBECONFIG_PATH) kubectl wait --for=condition=Ready nodes --all --timeout=600s + # Remove all taints from control plane node so that pods scheduled on it by tests can run (without this, some tests fail) + KUBECONFIG=$(KUBECONFIG_PATH) kubectl taint nodes -l node-role.kubernetes.io/control-plane node-role.kubernetes.io/control-plane- + +.PHONY: patch-linode-ccm +patch-linode-ccm: + KUBECONFIG=$(KUBECONFIG_PATH) kubectl patch -n kube-system daemonset ccm-linode --type='json' -p="[{'op': 'replace', 'path': '/spec/template/spec/containers/0/image', 'value': '${IMG}'}]" + KUBECONFIG=$(KUBECONFIG_PATH) kubectl rollout status -n kube-system daemonset/ccm-linode --timeout=600s + KUBECONFIG=$(KUBECONFIG_PATH) kubectl -n kube-system get daemonset/ccm-linode -o yaml + +.PHONY: mgmt-cluster +mgmt-cluster: + # Create a mgmt cluster + ctlptl apply -f e2e/setup/ctlptl-config.yaml + clusterctl init \ + --wait-providers \ + --wait-provider-timeout 600 \ + --core cluster-api:$(CAPI_VERSION) \ + --bootstrap kubeadm:$(CAPI_VERSION) \ + --control-plane kubeadm:$(CAPI_VERSION) \ + --addon helm:$(CAAPH_VERSION) \ + --infrastructure linode-linode:$(CAPL_VERSION) + kind get kubeconfig --name=caplccm > $(MGMT_KUBECONFIG_PATH) + +.PHONY: cleanup-cluster +cleanup-cluster: + kubectl delete cluster -A --all --timeout=180s + kubectl delete linodefirewalls -A --all --timeout=60s + kubectl delete lvpc -A --all --timeout=60s + kind delete cluster -n caplccm + +.PHONY: e2e-test +e2e-test: + CLUSTER_NAME=$(CLUSTER_NAME) \ + MGMT_KUBECONFIG=$(MGMT_KUBECONFIG_PATH) \ + KUBECONFIG=$(KUBECONFIG_PATH) \ + REGION=$(LINODE_REGION) \ + LINODE_TOKEN=$(LINODE_TOKEN) \ + chainsaw test e2e/test --parallel 2 + +.PHONY: e2e-test-bgp +e2e-test-bgp: + KUBECONFIG=$(KUBECONFIG_PATH) CLUSTER_SUFFIX=$(CLUSTER_NAME) ./e2e/setup/cilium-setup.sh + KUBECONFIG=$(KUBECONFIG_PATH) kubectl -n kube-system rollout status daemonset/ccm-linode --timeout=300s + CLUSTER_NAME=$(CLUSTER_NAME) \ + MGMT_KUBECONFIG=$(MGMT_KUBECONFIG_PATH) \ + KUBECONFIG=$(KUBECONFIG_PATH) \ + REGION=$(LINODE_REGION) \ + LINODE_TOKEN=$(LINODE_TOKEN) \ + chainsaw test e2e/bgp-test/lb-cilium-bgp + +##################################################################### +# OS / ARCH +##################################################################### + # Set the host's OS. Only linux and darwin supported for now HOSTOS := $(shell uname -s | tr '[:upper:]' '[:lower:]') ifeq ($(filter darwin linux,$(HOSTOS)),) $(error build only supported on linux and darwin host currently) endif +ARCH=$(shell uname -m) +ARCH_SHORT=$(ARCH) +ifeq ($(ARCH_SHORT),x86_64) +ARCH_SHORT := amd64 +else ifeq ($(ARCH_SHORT),aarch64) +ARCH_SHORT := arm64 +endif -HELM_VERSION ?= v3.9.1 -TOOLS_HOST_DIR ?= .tmp/tools -HELM := $(TOOLS_HOST_DIR)/helm-$(HELM_VERSION) - -.PHONY: $(HELM) -$(HELM): - @echo installing helm $(HELM_VERSION) - @mkdir -p $(TOOLS_HOST_DIR)/tmp-helm - @curl -fsSL https://get.helm.sh/helm-$(HELM_VERSION)-$(HOSTOS)-amd64.tar.gz | tar -xz -C $(TOOLS_HOST_DIR)/tmp-helm - @mv $(TOOLS_HOST_DIR)/tmp-helm/$(HOSTOS)-amd64/helm $(HELM) - @rm -fr $(TOOLS_HOST_DIR)/tmp-helm - @echo installing helm $(HELM_VERSION) +.PHONY: helm +helm: $(HELM) ## Download helm locally if necessary +$(HELM): $(LOCALBIN) + @curl -fsSL https://get.helm.sh/helm-$(HELM_VERSION)-$(HOSTOS)-$(ARCH_SHORT).tar.gz | tar -xz + @mv $(HOSTOS)-$(ARCH_SHORT)/helm $(HELM) + @rm -rf helm.tgz $(HOSTOS)-$(ARCH_SHORT) .PHONY: helm-lint -helm-lint: $(HELM) +helm-lint: helm #Verify lint works when region and apiToken are passed, and when it is passed as reference. @$(HELM) lint deploy/chart --set apiToken="apiToken",region="us-east" @$(HELM) lint deploy/chart --set secretRef.apiTokenRef="apiToken",secretRef.name="api",secretRef.regionRef="us-east" .PHONY: helm-template -helm-template: $(HELM) +helm-template: helm #Verify template works when region and apiToken are passed, and when it is passed as reference. @$(HELM) template foo deploy/chart --set apiToken="apiToken",region="us-east" > /dev/null @$(HELM) template foo deploy/chart --set secretRef.apiTokenRef="apiToken",secretRef.name="api",secretRef.regionRef="us-east" > /dev/null diff --git a/README.md b/README.md index 2236b2bf..9d8a521a 100644 --- a/README.md +++ b/README.md @@ -1,384 +1,107 @@ # Kubernetes Cloud Controller Manager for Linode [![Go Report Card](https://goreportcard.com/badge/github.com/linode/linode-cloud-controller-manager)](https://goreportcard.com/report/github.com/linode/linode-cloud-controller-manager) -[![Test](https://github.com/linode/linode-cloud-controller-manager/actions/workflows/test.yml/badge.svg)](https://github.com/linode/linode-cloud-controller-manager/actions/workflows/test.yml) -[![Coverage Status](https://coveralls.io/repos/github/linode/linode-cloud-controller-manager/badge.svg?branch=master)](https://coveralls.io/github/linode/linode-cloud-controller-manager?branch=master) +[![Continuous Integration](https://github.com/linode/linode-cloud-controller-manager/actions/workflows/ci.yml/badge.svg)](https://github.com/linode/linode-cloud-controller-manager/actions/workflows/ci.yml) +[![codecov](https://codecov.io/gh/linode/linode-cloud-controller-manager/graph/badge.svg?token=GSRnqHUmCk)](https://codecov.io/gh/linode/linode-cloud-controller-manager) [![Docker Pulls](https://img.shields.io/docker/pulls/linode/linode-cloud-controller-manager.svg)](https://hub.docker.com/r/linode/linode-cloud-controller-manager/) - [![Twitter](https://img.shields.io/twitter/follow/linode.svg?style=social&logo=twitter&label=Follow)](https://twitter.com/intent/follow?screen_name=linode) -## The purpose of the CCM -The Linode Cloud Controller Manager (CCM) creates a fully supported -Kubernetes experience on Linode. - -* Load balancers, Linode NodeBalancers, are automatically deployed when a -[Kubernetes Service of type "LoadBalancer"](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer) is deployed. This is the most -reliable way to allow services running in your cluster to be reachable from -the Internet. -* Linode hostnames and network addresses (private/public IPs) are automatically -associated with their corresponding Kubernetes resources, forming the basis for -a variety of Kubernetes features. -* Nodes resources are put into the correct state when Linodes are shut down, -allowing pods to be appropriately rescheduled. -* Nodes are annotated with the Linode region, which is the basis for scheduling based on -failure domains. - -## Kubernetes Supported Versions -Kubernetes 1.9+ - -## Usage - -### LoadBalancer Services -Kubernetes Services of type `LoadBalancer` will be served through a [Linode NodeBalancer](https://www.linode.com/nodebalancers) which the Cloud Controller Manager will provision on demand. For general feature and usage notes, refer to the [Getting Started with Linode NodeBalancers](https://www.linode.com/docs/platform/nodebalancer/getting-started-with-nodebalancers/) guide. - -#### Annotations -The Linode CCM accepts several annotations which affect the properties of the underlying NodeBalancer deployment. - -All of the Service annotation names listed below have been shortened for readability. The values, such as `http`, are case-sensitive. - -Each *Service* annotation **MUST** be prefixed with:
-**`service.beta.kubernetes.io/linode-loadbalancer-`** - -Annotation (Suffix) | Values | Default | Description ----|---|---|--- -`throttle` | `0`-`20` (`0` to disable) | `20` | Client Connection Throttle, which limits the number of subsequent new connections per second from the same client IP -`default-protocol` | `tcp`, `http`, `https` | `tcp` | This annotation is used to specify the default protocol for Linode NodeBalancer. -`default-proxy-protocol` | `none`, `v1`, `v2` | `none` | Specifies whether to use a version of Proxy Protocol on the underlying NodeBalancer. -`port-*` | json (e.g. `{ "tls-secret-name": "prod-app-tls", "protocol": "https", "proxy-protocol": "v2"}`) | | Specifies port specific NodeBalancer configuration. See [Port Specific Configuration](#port-specific-configuration). `*` is the port being configured, e.g. `linode-loadbalancer-port-443` -`check-type` | `none`, `connection`, `http`, `http_body` | | The type of health check to perform against back-ends to ensure they are serving requests -`check-path` | string | | The URL path to check on each back-end during health checks -`check-body` | string | | Text which must be present in the response body to pass the NodeBalancer health check -`check-interval` | int | | Duration, in seconds, to wait between health checks -`check-timeout` | int (1-30) | | Duration, in seconds, to wait for a health check to succeed before considering it a failure -`check-attempts` | int (1-30) | | Number of health check failures necessary to remove a back-end from the service -`check-passive` | [bool](#annotation-bool-values) | `false` | When `true`, `5xx` status codes will cause the health check to fail -`preserve` | [bool](#annotation-bool-values) | `false` | When `true`, deleting a `LoadBalancer` service does not delete the underlying NodeBalancer. This will also prevent deletion of the former LoadBalancer when another one is specified with the `nodebalancer-id` annotation. -`nodebalancer-id` | string | | The ID of the NodeBalancer to front the service. When not specified, a new NodeBalancer will be created. This can be configured on service creation or patching -`hostname-only-ingress` | [bool](#annotation-bool-values) | `false` | When `true`, the LoadBalancerStatus for the service will only contain the Hostname. This is useful for bypassing kube-proxy's rerouting of in-cluster requests originally intended for the external LoadBalancer to the service's constituent pod IPs. -`tags` | string | | A comma seperated list of tags to be applied to the createad NodeBalancer instance -`firewall-id` | string | | An existing Cloud Firewall ID to be attached to the NodeBalancer instance. See [Firewalls](#firewalls). -`firewall-acl` | string | | The Firewall rules to be applied to the NodeBalancer. Adding this annotation creates a new CCM managed Linode CloudFirewall instance. See [Firewalls](#firewalls). - -#### Deprecated Annotations -These annotations are deprecated, and will be removed in a future release. - -Annotation (Suffix) | Values | Default | Description | Scheduled Removal ----|---|---|---|--- -`proxy-protcol` | `none`, `v1`, `v2` | `none` | Specifies whether to use a version of Proxy Protocol on the underlying NodeBalancer | Q4 2021 - -#### Annotation bool values -For annotations with bool value types, `"1"`, `"t"`, `"T"`, `"True"`, `"true"` and `"True"` are valid string representations of `true`. Any other values will be interpreted as false. For more details, see [strconv.ParseBool](https://golang.org/pkg/strconv/#ParseBool). - -#### Port Specific Configuration -These configuration options can be specified via the `port-*` annotation, encoded in JSON. - -Key | Values | Default | Description ----|---|---|--- -`protocol` | `tcp`, `http`, `https` | `tcp` | Specifies protocol of the NodeBalancer port. Overwrites `default-protocol`. -`proxy-protocol` | `none`, `v1`, `v2` | `none` | Specifies whether to use a version of Proxy Protocol on the underlying NodeBalancer. Overwrites `default-proxy-protocol`. -`tls-secret-name` | string | | Specifies a secret to use for TLS. The secret type should be `kubernetes.io/tls`. - -#### Firewalls -Firewall rules can be applied to the CCM Managed NodeBalancers in two distinct ways. - -##### CCM Managed Firewall -To use this feature, ensure that the linode api token used with the ccm has the `add_firewalls` grant. - -The CCM accepts firewall ACLs in json form. The ACL can either be an `allowList` or a `denyList`. Supplying both is not supported. Supplying neither is not supported. The `allowList` sets up a CloudFirewall that `ACCEPT`s traffic only from the specified IPs/CIDRs and `DROP`s everything else. The `denyList` sets up a CloudFirewall that `DROP`s traffic only from the specified IPs/CIDRs and `ACCEPT`s everything else. Ports are automatically inferred from the service configuration. - -See [Firewall rules](https://www.linode.com/docs/api/networking/#firewall-create__request-body-schema) for more details on how to specify the IPs/CIDRs - -Example usage of an ACL to allow traffic from a specific set of addresses - -```yaml -kind: Service -apiVersion: v1 -metadata: - name: https-lb - annotations: - service.beta.kubernetes.io/linode-loadbalancer-firewall-acl: | - { - "allowList": { - "ipv4": ["192.166.0.0/16", "172.23.41.0/24"], - "ipv6": ["2001:DB8::/128"] - }, - } -spec: - type: LoadBalancer - selector: - app: nginx-https-example - ports: - - name: http - protocol: TCP - port: 80 - targetPort: http - - name: https - protocol: TCP - port: 443 - targetPort: https -``` - - -##### User Managed Firewall -Users can create CloudFirewall instances, supply their own rules and attach them to the NodeBalancer. To do so, set the -`service.beta.kubernetes.io/linode-loadbalancer-firewall-id` annotation to the ID of the cloud firewall. The CCM does not manage the lifecycle of the CloudFirewall Instance in this case. Users are responsible for ensuring the policies are correct. - -**Note**
-If the user supplies a firewall-id, and later switches to using an ACL, the CCM will take over the CloudFirewall Instance. To avoid this, delete the service, and re-create it so the original CloudFirewall is left undisturbed. - - - -### Nodes -Kubernetes Nodes can be configured with the following annotations. - -Each *Node* annotation **MUST** be prefixed with:
-**`node.k8s.linode.com/`** - -Key | Values | Default | Description ----|---|---|--- -`private-ip` | `IPv4` | `none` | Specifies the Linode Private IP overriding default detection of the Node InternalIP.
When using a [VLAN] or [VPC], the Node InternalIP may not be a Linode Private IP as [required for NodeBalancers] and should be specified. - - -[required for NodeBalancers]: https://www.linode.com/docs/api/nodebalancers/#nodebalancer-create__request-body-schema -[VLAN]: https://www.linode.com/products/vlan/ -[VPC]: https://www.linode.com/blog/linode/new-betas-coming-to-green-light/ - -### Example usage -```yaml -kind: Service -apiVersion: v1 -metadata: - name: https-lb - annotations: - service.beta.kubernetes.io/linode-loadbalancer-throttle: "4" - service.beta.kubernetes.io/linode-loadbalancer-default-protocol: "http" - service.beta.kubernetes.io/linode-loadbalancer-port-443: | - { - "tls-secret-name": "example-secret", - "protocol": "https" - } -spec: - type: LoadBalancer - selector: - app: nginx-https-example - ports: - - name: http - protocol: TCP - port: 80 - targetPort: http - - name: https - protocol: TCP - port: 443 - targetPort: https - ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: nginx-https-deployment -spec: - replicas: 2 - selector: - matchLabels: - app: nginx-https-example - template: - metadata: - labels: - app: nginx-https-example - spec: - containers: - - name: nginx - image: nginx - ports: - - name: http - containerPort: 80 - protocol: TCP - - name: https - containerPort: 80 - protocol: TCP - -``` - -See more in the [examples directory](examples) - -## Why `stickiness` and `algorithm` annotations don't exist -As kube-proxy will simply double-hop the traffic to a random backend Pod anyway, it doesn't matter which backend Node traffic is forwarded-to for the sake of session stickiness. -These annotations are not necessary to implement session stickiness, as kube-proxy will simply double-hop the packets to a random backend Pod. It would not make a difference to set a backend Node that would receive the network traffic in an attempt to set session stickiness. - -## How to use sessionAffinity -In Kubernetes, sessionAffinity refers to a mechanism that allows a client always to be redirected to the same pod when the client hits a service. - -To enable sessionAffinity `service.spec.sessionAffinity` field must be set to `ClientIP` as the following service yaml: - -```yaml -apiVersion: v1 -kind: Service -metadata: - name: wordpress-lsmnl-wordpress - namespace: wordpress-lsmnl - labels: - app: wordpress-lsmnl-wordpress -spec: - type: LoadBalancer - selector: - app: wordpress-lsmnl-wordpress - sessionAffinity: ClientIP -``` - -The max session sticky time can be set by setting the field `service.spec.sessionAffinityConfig.clientIP.timeoutSeconds` as below: - -```yaml -sessionAffinityConfig: - clientIP: - timeoutSeconds: 100 -``` - -## Generating a Manifest for Deployment -Use the script located at `./deploy/generate-manifest.sh` to generate a self-contained deployment manifest for the Linode CCM. Two arguments are required. - -The first argument must be a Linode APIv4 Personal Access Token with all permissions. -(https://cloud.linode.com/profile/tokens) - -The second argument must be a Linode region. -(https://api.linode.com/v4/regions) - -Example: - -```sh -./deploy/generate-manifest.sh $LINODE_API_TOKEN us-east -``` - -This will create a file `ccm-linode.yaml` which you can use to deploy the CCM. - -`kubectl apply -f ccm-linode.yaml` - -Note: Your kubelets, controller-manager, and apiserver must be started with `--cloud-provider=external` as noted in the following documentation. - -## Deployment Through Helm Chart -LINODE_API_TOKEN must be a Linode APIv4 [Personal Access Token](https://cloud.linode.com/profile/tokens) with all permissions. - -REGION must be a Linode [region](https://api.linode.com/v4/regions). -### Install the ccm-linode repo -```shell -helm repo add ccm-linode https://linode.github.io/linode-cloud-controller-manager/ -helm repo update ccm-linode -``` - -### To deploy ccm-linode. Run the following command: - -```sh -export VERSION=v0.3.22 -export LINODE_API_TOKEN= -export REGION= -helm install ccm-linode --set apiToken=$LINODE_API_TOKEN,region=$REGION ccm-linode/ccm-linode -``` -_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ - -### To uninstall ccm-linode from kubernetes cluster. Run the following command: -```sh -helm uninstall ccm-linode -``` -_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ - -### To upgrade when new changes are made to the helm chart. Run the following command: -```sh -export VERSION=v0.3.22 -export LINODE_API_TOKEN= -export REGION= - -helm upgrade ccm-linode --install --set apiToken=$LINODE_API_TOKEN,region=$REGION ccm-linode/ccm-linode -``` -_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ - -### Configurations -There are other variables that can be set to a different value. For list of all the modifiable variables/values, take a look at './deploy/chart/values.yaml'. - -Values can be set/overrided by using the '--set var=value,...' flag or by passing in a custom-values.yaml using '-f custom-values.yaml'. - -Recommendation: Use custom-values.yaml to override the variables to avoid any errors with template rendering - -### Upstream Documentation Including Deployment Instructions - -[Kubernetes Cloud Controller Manager](https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/). - -## Upstream Developer Documentation - -[Developing a Cloud Controller Manager](https://kubernetes.io/docs/tasks/administer-cluster/developing-cloud-controller-manager/). - -## Development Guide - -### Building the Linode Cloud Controller Manager - -Some of the Linode Cloud Controller Manager development helper scripts rely -on a fairly up-to-date GNU tools environment, so most recent Linux distros -should work just fine out-of-the-box. - -#### Setup Go - -The Linode Cloud Controller Manager is written in Google's Go programming -language. Currently, the Linode Cloud Controller Manager is developed and -tested on **Go 1.8.3**. If you haven't set up a Go development environment, -please follow [these instructions](https://golang.org/doc/install) to -install Go. - -On macOS, Homebrew has a nice package - -```bash -brew install golang -``` - -#### Download Source - -```bash -go get github.com/linode/linode-cloud-controller-manager -cd $(go env GOPATH)/src/github.com/linode/linode-cloud-controller-manager -``` - -#### Install Dev tools -To install various dev tools for Pharm Controller Manager, run the following command: - -```bash -./hack/builddeps.sh -``` - -#### Build Binary -Use the following Make targets to build and run a local binary - -```bash -$ make build -$ make run -# You can also run the binary directly to pass additional args -$ dist/linode-cloud-controller-manager -``` - -#### Dependency management -Linode Cloud Controller Manager uses [Go Modules](https://blog.golang.org/using-go-modules) to manage dependencies. -If you want to update/add dependencies, run: - -```bash -go mod tidy -``` - -#### Building Docker images -To build and push a Docker image, use the following make targets. - -```bash -# Set the repo/image:tag with the TAG environment variable -# Then run the docker-build make target -$ IMG=linode/linode-cloud-controller-manager:canary make docker-build - -# Push Image -$ IMG=linode/linode-cloud-controller-manager:canary make docker-push -``` - -Then, to run the image - -```bash -docker run -ti linode/linode-cloud-controller-manager:canary -``` - -## Contribution Guidelines -Want to improve the linode-cloud-controller-manager? Please start [here](.github/CONTRIBUTING.md). - -## Join the Kubernetes Community -For general help or discussion, join us in #linode on the [Kubernetes Slack](https://kubernetes.slack.com/messages/CD4B15LUR/details/). To sign up, use the [Kubernetes Slack inviter](http://slack.kubernetes.io/). +## Overview + +The Linode Cloud Controller Manager (CCM) is a crucial component that integrates Kubernetes with Linode's infrastructure services. It implements the cloud-controller-manager binary, running cloud-specific control loops that are essential for cluster operation. + +A Cloud Controller Manager (CCM) is a Kubernetes control plane component that embeds cloud-specific control logic. It lets you link your cluster to your cloud provider's API, separating out the components that interact with that cloud platform from components that only interact with your cluster. + +### Core Components + +#### Node Controller +- Initializes node configuration with Linode-specific information + - Sets node addresses (public/private IPs) + - Labels nodes with region/zone information + - Configures node hostnames +- Monitors node health and lifecycle + - Detects node termination + - Updates node status + - Manages node cleanup + +#### Service Controller +- Manages LoadBalancer service implementations using Linode NodeBalancers + - Creates and configures NodeBalancers + - Updates backend pools + - Manages SSL/TLS certificates +- Handles automatic provisioning and configuration + - Health checks + - Session affinity + - Protocol configuration +- Supports multiple load balancing approaches + - Traditional NodeBalancer deployment + - BGP-based IP sharing for cost optimization + - Custom firewall rules and security configurations + +#### Route Controller +- Manages VPC and private network integration + - Configures routes for pod CIDR ranges + - Handles cross-node pod communication +- Ensures proper network connectivity + - Sets up pod-to-pod networking + - Manages network policies + - Configures network routes for optimal communication + +## Requirements + +- Kubernetes 1.22+ +- Kubelets, controller-manager, and apiserver with `--cloud-provider=external` +- Linode APIv4 Token +- Supported Linode region + +## Documentation + +### Quick Start +- [Getting Started Guide](docs/getting-started/README.md) - Start here for installation and setup + - [Overview](docs/getting-started/overview.md) - Learn about CCM basics + - [Requirements](docs/getting-started/requirements.md) - Check prerequisites + - [Installation](docs/getting-started/installation.md) - Install the CCM + - [Helm Installation](docs/getting-started/helm-installation.md) - Install using Helm + - [Manual Installation](docs/getting-started/manual-installation.md) - Manual setup instructions + - [Verification](docs/getting-started/verification.md) - Verify your installation + - [Troubleshooting](docs/getting-started/troubleshooting.md) - Common issues and solutions + +### Configuration +- [Configuration Guide](docs/configuration/README.md) - Detailed configuration options + - [LoadBalancer Services](docs/configuration/loadbalancer.md) + - [Service Annotations](docs/configuration/annotations.md) + - [Node Configuration](docs/configuration/nodes.md) + - [Environment Variables](docs/configuration/environment.md) + - [Firewall Setup](docs/configuration/firewall.md) + - [Route Configuration](docs/configuration/routes.md) + - [Session Affinity](docs/configuration/session-affinity.md) + +### Examples and Development +- [Examples](docs/examples/README.md) - Real-world usage examples + - [Basic Services](docs/examples/basic.md) + - [Advanced Configuration](docs/examples/advanced.md) +- [Development Guide](docs/development/README.md) - Contributing to CCM + +## Getting Help + +### Community Support + +For general help or discussion, join us in #linode on the [Kubernetes Slack](https://kubernetes.slack.com/messages/CD4B15LUR/details/). + +To sign up for Kubernetes Slack, use the [Kubernetes Slack inviter](http://slack.kubernetes.io/). + +### Issue Tracking + +If you've found a bug or want to request a feature: +- Check the [GitHub Issues](https://github.com/linode/linode-cloud-controller-manager/issues) +- Submit a [Pull Request](https://github.com/linode/linode-cloud-controller-manager/pulls) + +### Additional Resources + +- [Official Linode Documentation](https://www.linode.com/docs/) +- [Kubernetes Cloud Controller Manager Documentation](https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/) +- [API Documentation](https://www.linode.com/docs/api) + +## Contributing + +Want to improve the Linode Cloud Controller Manager? Please see our [contributing guidelines](.github/CONTRIBUTING.md). diff --git a/_config.yaml b/_config.yaml new file mode 100644 index 00000000..88b63ad6 --- /dev/null +++ b/_config.yaml @@ -0,0 +1 @@ +markdown: GFM diff --git a/cloud/annotations/annotations.go b/cloud/annotations/annotations.go new file mode 100644 index 00000000..21736009 --- /dev/null +++ b/cloud/annotations/annotations.go @@ -0,0 +1,36 @@ +package annotations + +const ( + // AnnLinodeDefaultProtocol is the annotation used to specify the default protocol + // for Linode load balancers. Options are tcp, http and https. Defaults to tcp. + AnnLinodeDefaultProtocol = "service.beta.kubernetes.io/linode-loadbalancer-default-protocol" + AnnLinodePortConfigPrefix = "service.beta.kubernetes.io/linode-loadbalancer-port-" + AnnLinodeDefaultProxyProtocol = "service.beta.kubernetes.io/linode-loadbalancer-default-proxy-protocol" + + AnnLinodeCheckPath = "service.beta.kubernetes.io/linode-loadbalancer-check-path" + AnnLinodeCheckBody = "service.beta.kubernetes.io/linode-loadbalancer-check-body" + AnnLinodeHealthCheckType = "service.beta.kubernetes.io/linode-loadbalancer-check-type" + + AnnLinodeHealthCheckInterval = "service.beta.kubernetes.io/linode-loadbalancer-check-interval" + AnnLinodeHealthCheckTimeout = "service.beta.kubernetes.io/linode-loadbalancer-check-timeout" + AnnLinodeHealthCheckAttempts = "service.beta.kubernetes.io/linode-loadbalancer-check-attempts" + AnnLinodeHealthCheckPassive = "service.beta.kubernetes.io/linode-loadbalancer-check-passive" + + // AnnLinodeThrottle is the annotation specifying the value of the Client Connection + // Throttle, which limits the number of subsequent new connections per second from the + // same client IP. Options are a number between 1-20, or 0 to disable. Defaults to 20. + AnnLinodeThrottle = "service.beta.kubernetes.io/linode-loadbalancer-throttle" + + AnnLinodeLoadBalancerPreserve = "service.beta.kubernetes.io/linode-loadbalancer-preserve" + AnnLinodeNodeBalancerID = "service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id" + + AnnLinodeHostnameOnlyIngress = "service.beta.kubernetes.io/linode-loadbalancer-hostname-only-ingress" + AnnLinodeLoadBalancerTags = "service.beta.kubernetes.io/linode-loadbalancer-tags" + AnnLinodeCloudFirewallID = "service.beta.kubernetes.io/linode-loadbalancer-firewall-id" + AnnLinodeCloudFirewallACL = "service.beta.kubernetes.io/linode-loadbalancer-firewall-acl" + + AnnLinodeNodePrivateIP = "node.k8s.linode.com/private-ip" + AnnLinodeHostUUID = "node.k8s.linode.com/host-uuid" + + AnnLinodeNodeIPSharingUpdated = "node.k8s.linode.com/ip-sharing-updated" +) diff --git a/cloud/linode/annotations.go b/cloud/linode/annotations.go deleted file mode 100644 index 18c0f874..00000000 --- a/cloud/linode/annotations.go +++ /dev/null @@ -1,34 +0,0 @@ -package linode - -const ( - // annLinodeDefaultProtocol is the annotation used to specify the default protocol - // for Linode load balancers. Options are tcp, http and https. Defaults to tcp. - annLinodeDefaultProtocol = "service.beta.kubernetes.io/linode-loadbalancer-default-protocol" - annLinodePortConfigPrefix = "service.beta.kubernetes.io/linode-loadbalancer-port-" - annLinodeDefaultProxyProtocol = "service.beta.kubernetes.io/linode-loadbalancer-default-proxy-protocol" - - annLinodeCheckPath = "service.beta.kubernetes.io/linode-loadbalancer-check-path" - annLinodeCheckBody = "service.beta.kubernetes.io/linode-loadbalancer-check-body" - annLinodeHealthCheckType = "service.beta.kubernetes.io/linode-loadbalancer-check-type" - - annLinodeHealthCheckInterval = "service.beta.kubernetes.io/linode-loadbalancer-check-interval" - annLinodeHealthCheckTimeout = "service.beta.kubernetes.io/linode-loadbalancer-check-timeout" - annLinodeHealthCheckAttempts = "service.beta.kubernetes.io/linode-loadbalancer-check-attempts" - annLinodeHealthCheckPassive = "service.beta.kubernetes.io/linode-loadbalancer-check-passive" - - // annLinodeThrottle is the annotation specifying the value of the Client Connection - // Throttle, which limits the number of subsequent new connections per second from the - // same client IP. Options are a number between 1-20, or 0 to disable. Defaults to 20. - annLinodeThrottle = "service.beta.kubernetes.io/linode-loadbalancer-throttle" - - annLinodeLoadBalancerPreserve = "service.beta.kubernetes.io/linode-loadbalancer-preserve" - annLinodeNodeBalancerID = "service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id" - - annLinodeHostnameOnlyIngress = "service.beta.kubernetes.io/linode-loadbalancer-hostname-only-ingress" - annLinodeLoadBalancerTags = "service.beta.kubernetes.io/linode-loadbalancer-tags" - annLinodeCloudFirewallID = "service.beta.kubernetes.io/linode-loadbalancer-firewall-id" - annLinodeCloudFirewallACL = "service.beta.kubernetes.io/linode-loadbalancer-firewall-acl" - - annLinodeNodePrivateIP = "node.k8s.linode.com/private-ip" - annLinodeHostUUID = "node.k8s.linode.com/host-uuid" -) diff --git a/cloud/linode/cilium_loadbalancers.go b/cloud/linode/cilium_loadbalancers.go new file mode 100644 index 00000000..71dc5632 --- /dev/null +++ b/cloud/linode/cilium_loadbalancers.go @@ -0,0 +1,583 @@ +package linode + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "os" + "slices" + "strings" + + "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" + ciliumclient "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1" + slimv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" + "github.com/google/uuid" + "github.com/linode/linode-cloud-controller-manager/cloud/annotations" + "github.com/linode/linodego" + v1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/util/retry" + "k8s.io/klog/v2" + "k8s.io/utils/ptr" +) + +const ( + ciliumLBClass = "io.cilium/bgp-control-plane" + ipHolderLabelPrefix = "linode-ccm-ip-holder" + ciliumBGPPeeringPolicyName = "linode-ccm-bgp-peering" + defaultBGPPeerPrefix = "2600:3c0f" + commonControlPlaneLabel = "node-role.kubernetes.io/control-plane" +) + +// This mapping is unfortunately necessary since there is no way to get the +// numeric ID for a data center from the API. +// These values come from https://www.linode.com/docs/products/compute/compute-instances/guides/failover/#ip-sharing-availability +var ( + regionIDMap = map[string]int{ + "nl-ams": 22, // Amsterdam (Netherlands) + "us-southeast": 4, // Atlanta, GA (USA) + "in-maa": 25, // Chennai (India) + "us-ord": 18, // Chicago, IL (USA) + "us-central": 2, // Dallas, TX (USA) + "eu-central": 10, // Frankfurt (Germany) + // "us-west": 3, // Fremont, CA (USA) Undergoing network upgrades + "id-cgk": 29, // Jakarta (Indonesia) + "eu-west": 7, // London (United Kingdom) + "gb-lon": 44, // London 2 (United Kingdom) + "us-lax": 30, // Los Angeles, CA (USA) + "es-mad": 24, // Madrid (Spain) + "au-mel": 45, // Melbourne (Australia) + "us-mia": 28, // Miami, FL (USA) + "it-mil": 27, // Milan (Italy) + "ap-west": 14, // Mumbai (India) + "in-bom-2": 46, // Mumbai 2 (India) + "us-east": 6, // Newark, NJ (USA) + "jp-osa": 26, // Osaka (Japan) + "fr-par": 19, // Paris (France) + "br-gru": 21, // Sรฃo Paulo (Brazil) + "us-sea": 20, // Seattle, WA (USA) + "ap-south": 9, // Singapore + "sg-sin-2": 48, // Singapore 2 + "se-sto": 23, // Stockholm (Sweden) + "ap-southeast": 16, // Sydney (Australia) + "ap-northeast": 11, // Tokyo (Japan) + "ca-central": 15, // Toronto (Canada) + "us-iad": 17, // Washington, DC (USA) + } +) + +// getExistingSharedIPsInCluster determines the list of addresses to share on nodes by checking the +// CiliumLoadBalancerIPPools created by the CCM in createCiliumLBIPPool +// NOTE: Cilium CRDs must be installed for this to work +func (l *loadbalancers) getExistingSharedIPsInCluster(ctx context.Context) ([]string, error) { + addrs := []string{} + if err := l.retrieveCiliumClientset(); err != nil { + return addrs, err + } + pools, err := l.ciliumClient.CiliumLoadBalancerIPPools().List(ctx, metav1.ListOptions{ + LabelSelector: "app.kubernetes.io/managed-by=linode-ccm", + }) + if err != nil { + return addrs, err + } + for _, pool := range pools.Items { + for _, block := range pool.Spec.Blocks { + addrs = append(addrs, strings.TrimSuffix(string(block.Cidr), "/32")) + } + } + return addrs, nil +} + +func (l *loadbalancers) getExistingSharedIPs(ctx context.Context, ipHolder *linodego.Instance) ([]string, error) { + if ipHolder == nil { + return nil, nil + } + ipHolderAddrs, err := l.client.GetInstanceIPAddresses(ctx, ipHolder.ID) + if err != nil { + return nil, err + } + addrs := make([]string, 0, len(ipHolderAddrs.IPv4.Public)) + for _, addr := range ipHolderAddrs.IPv4.Public { + addrs = append(addrs, addr.Address) + } + return addrs, nil +} + +// shareIPs shares the given list of IP addresses on the given Node +func (l *loadbalancers) shareIPs(ctx context.Context, addrs []string, node *v1.Node) error { + nodeLinodeID, err := parseProviderID(node.Spec.ProviderID) + if err != nil { + return err + } + if err = l.retrieveKubeClient(); err != nil { + return err + } + if err = l.client.ShareIPAddresses(ctx, linodego.IPAddressesShareOptions{ + IPs: addrs, + LinodeID: nodeLinodeID, + }); err != nil { + return err + } + // need to make sure node is up-to-date + node, err = l.kubeClient.CoreV1().Nodes().Get(ctx, node.Name, metav1.GetOptions{}) + if err != nil { + return err + } + if node.Labels == nil { + node.Labels = make(map[string]string) + } + node.Labels[annotations.AnnLinodeNodeIPSharingUpdated] = "true" + retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error { + _, err := l.kubeClient.CoreV1().Nodes().Update(ctx, node, metav1.UpdateOptions{}) + return err + }) + if retryErr != nil { + klog.Infof("could not update Node: %s", retryErr.Error()) + return retryErr + } + + klog.Infof("shared IPs %v on Linode %d", addrs, nodeLinodeID) + + return nil +} + +// handleIPSharing makes sure that the appropriate Nodes that are labeled to +// perform IP sharing (via a specified node selector) have the expected IPs shared +// in the event that a Node joins the cluster after the LoadBalancer Service already +// exists +func (l *loadbalancers) handleIPSharing(ctx context.Context, node *v1.Node, ipHolderSuffix string) error { + // ignore cases where the provider ID has been set + if node.Spec.ProviderID == "" { + klog.Info("skipping IP while providerID is unset") + return nil + } + // If performing Service load-balancing via IP sharing + BGP, check for a special annotation + // added by the CCM gets set when load-balancer IPs have been successfully shared on the node + if Options.BGPNodeSelector != "" { + kv := strings.Split(Options.BGPNodeSelector, "=") + // Check if node should be participating in IP sharing via the given selector + if val, ok := node.Labels[kv[0]]; !ok || len(kv) != 2 || val != kv[1] { + // not a selected Node + return nil + } + } else if _, ok := node.Labels[commonControlPlaneLabel]; ok { + // If there is no node selector specified, default to sharing across worker nodes only + return nil + } + // check if node has been updated with IPs to share + if _, foundIpSharingUpdatedLabel := node.Labels[annotations.AnnLinodeNodeIPSharingUpdated]; foundIpSharingUpdatedLabel { + // IPs are already shared on the Node + return nil + } + // Get the IPs to be shared on the Node and configure sharing. + // This also annotates the node that IPs have been shared. + inClusterAddrs, err := l.getExistingSharedIPsInCluster(ctx) + if err != nil { + klog.Infof("error getting shared IPs in cluster: %s", err.Error()) + return err + } + // if any of the addrs don't exist on the ip-holder (e.g. someone manually deleted it outside the CCM), + // we need to exclude that from the list + // TODO: also clean up the CiliumLoadBalancerIPPool for that missing IP if that happens + ipHolder, err := l.getIPHolder(ctx, ipHolderSuffix) + if err != nil { + return err + } + ipHolderAddrs, err := l.getExistingSharedIPs(ctx, ipHolder) + if err != nil { + klog.Infof("error getting shared IPs in cluster: %s", err.Error()) + return err + } + addrs := []string{} + for _, i := range inClusterAddrs { + if slices.Contains(ipHolderAddrs, i) { + addrs = append(addrs, i) + } + } + if err = l.shareIPs(ctx, addrs, node); err != nil { + klog.Infof("error sharing IPs: %s", err.Error()) + return err + } + + return nil +} + +// createSharedIP requests an additional IP that can be shared on Nodes to support +// loadbalancing via Cilium LB IPAM + BGP Control Plane. +func (l *loadbalancers) createSharedIP(ctx context.Context, nodes []*v1.Node, ipHolderSuffix string) (string, error) { + ipHolder, err := l.ensureIPHolder(ctx, ipHolderSuffix) + if err != nil { + return "", err + } + + newSharedIP, err := l.client.AddInstanceIPAddress(ctx, ipHolder.ID, true) + if err != nil { + return "", err + } + + // need to retrieve existing public IPs on the IP holder since ShareIPAddresses + // expects the full list of IPs to be shared + inClusterAddrs, err := l.getExistingSharedIPsInCluster(ctx) + if err != nil { + return "", err + } + // if any of the addrs don't exist on the ip-holder (e.g. someone manually deleted it outside the CCM), + // we need to exclude that from the list + // TODO: also clean up the CiliumLoadBalancerIPPool for that missing IP if that happens + ipHolderAddrs, err := l.getExistingSharedIPs(ctx, ipHolder) + if err != nil { + klog.Infof("error getting shared IPs in cluster: %s", err.Error()) + return "", err + } + addrs := []string{newSharedIP.Address} + for _, i := range inClusterAddrs { + if slices.Contains(ipHolderAddrs, i) { + addrs = append(addrs, i) + } + } + + // share the IPs with nodes participating in Cilium BGP peering + if Options.BGPNodeSelector == "" { + for _, node := range nodes { + if _, ok := node.Labels[commonControlPlaneLabel]; !ok { + if err = l.shareIPs(ctx, addrs, node); err != nil { + return "", err + } + } + } + } else { + kv := strings.Split(Options.BGPNodeSelector, "=") + for _, node := range nodes { + if val, ok := node.Labels[kv[0]]; ok && len(kv) == 2 && val == kv[1] { + if err = l.shareIPs(ctx, addrs, node); err != nil { + return "", err + } + } + } + } + + return newSharedIP.Address, nil +} + +// deleteSharedIP cleans up the shared IP for a LoadBalancer Service if it was assigned +// by Cilium LB IPAM, removing it from the ip-holder +func (l *loadbalancers) deleteSharedIP(ctx context.Context, service *v1.Service) error { + err := l.retrieveKubeClient() + if err != nil { + return err + } + nodeList, err := l.kubeClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{ + LabelSelector: Options.BGPNodeSelector, + }) + if err != nil { + return err + } + bgpNodes := nodeList.Items + + serviceNn := getServiceNn(service) + var ipHolderSuffix string + if Options.IpHolderSuffix != "" { + ipHolderSuffix = Options.IpHolderSuffix + klog.V(3).Infof("using parameter-based IP Holder suffix %s for Service %s", ipHolderSuffix, serviceNn) + } + + ipHolder, err := l.getIPHolder(ctx, ipHolderSuffix) + if err != nil { + // return error or nil if not found since no IP holder means there + // is no IP to reclaim + return IgnoreLinodeAPIError(err, http.StatusNotFound) + } + svcIngress := service.Status.LoadBalancer.Ingress + if len(svcIngress) > 0 && ipHolder != nil { + for _, ingress := range svcIngress { + // delete the shared IP on the Linodes it's shared on + for _, node := range bgpNodes { + nodeLinodeID, err := parseProviderID(node.Spec.ProviderID) + if err != nil { + return err + } + err = l.client.DeleteInstanceIPAddress(ctx, nodeLinodeID, ingress.IP) + if IgnoreLinodeAPIError(err, http.StatusNotFound) != nil { + return err + } + } + + // finally delete the shared IP on the ip-holder + err = l.client.DeleteInstanceIPAddress(ctx, ipHolder.ID, ingress.IP) + if IgnoreLinodeAPIError(err, http.StatusNotFound) != nil { + return err + } + } + } + + return nil +} + +// To hold the IP in lieu of a proper IP reservation system, a special Nanode is +// created but not booted and used to hold all shared IPs. +func (l *loadbalancers) ensureIPHolder(ctx context.Context, suffix string) (*linodego.Instance, error) { + ipHolder, err := l.getIPHolder(ctx, suffix) + if err != nil { + return nil, err + } + if ipHolder != nil { + return ipHolder, nil + } + label := generateClusterScopedIPHolderLinodeName(l.zone, suffix) + ipHolder, err = l.client.CreateInstance(ctx, linodego.InstanceCreateOptions{ + Region: l.zone, + Type: "g6-nanode-1", + Label: label, + RootPass: uuid.NewString(), + Image: "linode/ubuntu22.04", + Booted: ptr.To(false), + }) + if err != nil { + if linodego.ErrHasStatus(err, http.StatusBadRequest) && strings.Contains(err.Error(), "Label must be unique") { + // TODO (rk): should we handle more status codes on error? + klog.Errorf("failed to create new IP Holder instance %s since it already exists: %s", label, err.Error()) + return nil, err + } + return nil, err + } + klog.Infof("created new IP Holder instance %s", label) + + return ipHolder, nil +} + +func (l *loadbalancers) getIPHolder(ctx context.Context, suffix string) (*linodego.Instance, error) { + // even though we have updated the naming convention, leaving this in ensures we have backwards compatibility + filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, l.zone)} + rawFilter, err := json.Marshal(filter) + if err != nil { + panic("this should not have failed") + } + var ipHolder *linodego.Instance + // TODO (rk): should we switch to using GET instead of LIST? we would be able to wrap logic around errors + linodes, err := l.client.ListInstances(ctx, linodego.NewListOptions(1, string(rawFilter))) + if err != nil { + return nil, err + } + if len(linodes) == 0 { + // since a list that returns 0 results has a 200/OK status code (no error) + + // we assume that either + // a) an ip holder instance does not exist yet + // or + // b) another cluster already holds the linode grant to an ip holder using the old naming convention + filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(l.zone, suffix)} + rawFilter, err = json.Marshal(filter) + if err != nil { + panic("this should not have failed") + } + linodes, err = l.client.ListInstances(ctx, linodego.NewListOptions(1, string(rawFilter))) + if err != nil { + return nil, err + } + } + if len(linodes) > 0 { + ipHolder = &linodes[0] + } + return ipHolder, nil +} + +// generateClusterScopedIPHolderLinodeName attempts to generate a unique name for the IP Holder +// instance used alongside Cilium LoadBalancers and Shared IPs for Kubernetes Services. +// If the `--ip-holder-suffix` arg is passed when running Linode CCM, `suffix` is set to that value. +func generateClusterScopedIPHolderLinodeName(zone, suffix string) (label string) { + // since Linode CCM consumers are varied, we require a method of providing a + // suffix that does not rely on the use of a specific product (ex. LKE) to + // have a specific piece of metadata (ex. annotation(s), label(s) ) present to key off of. + + if suffix == "" { + // this avoids a trailing hyphen if suffix is empty (ex. linode-ccm-ip-holder-us-ord-) + label = fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone) + } else { + label = fmt.Sprintf("%s-%s-%s", ipHolderLabelPrefix, zone, suffix) + } + klog.V(5).Infof("generated IP Holder Linode label: %s", label) + return label +} + +func (l *loadbalancers) retrieveCiliumClientset() error { + if l.ciliumClient != nil { + return nil + } + var ( + kubeConfig *rest.Config + err error + ) + kubeconfigFlag := Options.KubeconfigFlag + if kubeconfigFlag == nil || kubeconfigFlag.Value.String() == "" { + kubeConfig, err = rest.InClusterConfig() + } else { + kubeConfig, err = clientcmd.BuildConfigFromFlags("", kubeconfigFlag.Value.String()) + } + if err != nil { + return err + } + l.ciliumClient, err = ciliumclient.NewForConfig(kubeConfig) + + return err +} + +// for LoadBalancer Services not backed by a NodeBalancer, a CiliumLoadBalancerIPPool resource +// will be created specifically for the Service with the requested shared IP +// NOTE: Cilium CRDs must be installed for this to work +func (l *loadbalancers) createCiliumLBIPPool(ctx context.Context, service *v1.Service, sharedIP string) (*v2alpha1.CiliumLoadBalancerIPPool, error) { + if err := l.retrieveCiliumClientset(); err != nil { + return nil, err + } + ciliumLBIPPool := &v2alpha1.CiliumLoadBalancerIPPool{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-%s-pool", service.Namespace, service.Name), + Labels: map[string]string{"app.kubernetes.io/managed-by": "linode-ccm"}, + }, + Spec: v2alpha1.CiliumLoadBalancerIPPoolSpec{ + ServiceSelector: &slimv1.LabelSelector{ + MatchLabels: map[string]slimv1.MatchLabelsValue{ + "io.kubernetes.service.namespace": service.Namespace, + "io.kubernetes.service.name": service.Name, + }, + }, + Blocks: []v2alpha1.CiliumLoadBalancerIPPoolIPBlock{{ + Cidr: v2alpha1.IPv4orIPv6CIDR(fmt.Sprintf("%s/32", sharedIP)), + }}, + Disabled: false, + }, + } + + return l.ciliumClient.CiliumLoadBalancerIPPools().Create(ctx, ciliumLBIPPool, metav1.CreateOptions{}) +} + +// NOTE: Cilium CRDs must be installed for this to work +func (l *loadbalancers) deleteCiliumLBIPPool(ctx context.Context, service *v1.Service) error { + if err := l.retrieveCiliumClientset(); err != nil { + return err + } + + return l.ciliumClient.CiliumLoadBalancerIPPools().Delete( + ctx, + fmt.Sprintf("%s-%s-pool", service.Namespace, service.Name), + metav1.DeleteOptions{}, + ) +} + +// NOTE: Cilium CRDs must be installed for this to work +func (l *loadbalancers) getCiliumLBIPPool(ctx context.Context, service *v1.Service) (*v2alpha1.CiliumLoadBalancerIPPool, error) { + if err := l.retrieveCiliumClientset(); err != nil { + return nil, err + } + + return l.ciliumClient.CiliumLoadBalancerIPPools().Get( + ctx, + fmt.Sprintf("%s-%s-pool", service.Namespace, service.Name), + metav1.GetOptions{}, + ) +} + +// NOTE: Cilium CRDs must be installed for this to work +func (l *loadbalancers) ensureCiliumBGPPeeringPolicy(ctx context.Context) error { + if raw, ok := os.LookupEnv("BGP_CUSTOM_ID_MAP"); ok && raw != "" { + klog.Info("BGP_CUSTOM_ID_MAP env variable specified, using it instead of the default region map") + if err := json.Unmarshal([]byte(raw), ®ionIDMap); err != nil { + return err + } + } + regionID, ok := regionIDMap[l.zone] + if !ok { + return fmt.Errorf("unsupported region for BGP: %s", l.zone) + } + if err := l.retrieveCiliumClientset(); err != nil { + return err + } + // check if policy already exists + policy, err := l.ciliumClient.CiliumBGPPeeringPolicies().Get(ctx, ciliumBGPPeeringPolicyName, metav1.GetOptions{}) + if err != nil && !k8serrors.IsNotFound(err) { + klog.Infof("Failed to get CiliumBGPPeeringPolicy: %s", err.Error()) + return err + } + // if the CiliumBGPPeeringPolicy doesn't exist, it's not nil, just empty + if policy != nil && policy.Name != "" { + return nil + } + + // otherwise create it + var nodeSelector slimv1.LabelSelector + // If no BGPNodeSelector is specified, select all worker nodes. + if Options.BGPNodeSelector == "" { + nodeSelector = slimv1.LabelSelector{ + MatchExpressions: []slimv1.LabelSelectorRequirement{ + { + Key: commonControlPlaneLabel, + Operator: slimv1.LabelSelectorOpDoesNotExist, + }, + }, + } + } else { + kv := strings.Split(Options.BGPNodeSelector, "=") + if len(kv) != 2 { + return fmt.Errorf("invalid node selector %s", Options.BGPNodeSelector) + } + + nodeSelector = slimv1.LabelSelector{MatchLabels: map[string]string{kv[0]: kv[1]}} + } + + ciliumBGPPeeringPolicy := &v2alpha1.CiliumBGPPeeringPolicy{ + ObjectMeta: metav1.ObjectMeta{ + Name: ciliumBGPPeeringPolicyName, + }, + Spec: v2alpha1.CiliumBGPPeeringPolicySpec{ + NodeSelector: &nodeSelector, + VirtualRouters: []v2alpha1.CiliumBGPVirtualRouter{{ + LocalASN: 65001, + ExportPodCIDR: ptr.To(true), + ServiceSelector: &slimv1.LabelSelector{ + // By default, virtual routers will not announce any services. + // This selector makes it so all services within the cluster are announced. + // See https://docs.cilium.io/en/stable/network/bgp-control-plane/#service-announcements + // for more information. + MatchExpressions: []slimv1.LabelSelectorRequirement{{ + Key: "somekey", + Operator: slimv1.LabelSelectorOpNotIn, + Values: []string{"never-used-value"}, + }}, + }, + }}, + }, + } + bgpPeerPrefix := defaultBGPPeerPrefix + if raw, ok := os.LookupEnv("BGP_PEER_PREFIX"); ok && raw != "" { + klog.Info("BGP_PEER_PREFIX env variable specified, using it instead of the default bgpPeer prefix") + bgpPeerPrefix = raw + } + // As in https://github.com/linode/lelastic, there are 4 peers per DC + for i := 1; i <= 4; i++ { + neighbor := v2alpha1.CiliumBGPNeighbor{ + PeerAddress: fmt.Sprintf("%s:%d:34::%d/64", bgpPeerPrefix, regionID, i), + PeerASN: 65000, + EBGPMultihopTTL: ptr.To(int32(10)), + ConnectRetryTimeSeconds: ptr.To(int32(5)), + HoldTimeSeconds: ptr.To(int32(9)), + KeepAliveTimeSeconds: ptr.To(int32(3)), + AdvertisedPathAttributes: []v2alpha1.CiliumBGPPathAttributes{ + { + SelectorType: "CiliumLoadBalancerIPPool", + Communities: &v2alpha1.BGPCommunities{ + Standard: []v2alpha1.BGPStandardCommunity{"65000:1", "65000:2"}, + }, + }, + }, + } + ciliumBGPPeeringPolicy.Spec.VirtualRouters[0].Neighbors = append(ciliumBGPPeeringPolicy.Spec.VirtualRouters[0].Neighbors, neighbor) + } + + klog.Info("Creating CiliumBGPPeeringPolicy") + _, err = l.ciliumClient.CiliumBGPPeeringPolicies().Create(ctx, ciliumBGPPeeringPolicy, metav1.CreateOptions{}) + + return err +} diff --git a/cloud/linode/cilium_loadbalancers_test.go b/cloud/linode/cilium_loadbalancers_test.go new file mode 100644 index 00000000..f03bfaeb --- /dev/null +++ b/cloud/linode/cilium_loadbalancers_test.go @@ -0,0 +1,708 @@ +package linode + +import ( + "context" + "encoding/json" + "fmt" + "net" + "testing" + + k8sClient "github.com/cilium/cilium/pkg/k8s/client" + fakev2alpha1 "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake" + "github.com/golang/mock/gomock" + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client/mocks" + "github.com/linode/linodego" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +var ( + zone = "us-ord" + nodes = []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node-1", + Labels: map[string]string{"cilium-bgp-peering": "true"}, + }, + Spec: v1.NodeSpec{ + ProviderID: fmt.Sprintf("%s%d", providerIDPrefix, 11111), + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node-2", + Labels: map[string]string{"cilium-bgp-peering": "true"}, + }, + Spec: v1.NodeSpec{ + ProviderID: fmt.Sprintf("%s%d", providerIDPrefix, 22222), + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node-3", + }, + Spec: v1.NodeSpec{ + ProviderID: fmt.Sprintf("%s%d", providerIDPrefix, 33333), + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node-control", + Labels: map[string]string{ + commonControlPlaneLabel: "", + }, + }, + Spec: v1.NodeSpec{ + ProviderID: fmt.Sprintf("%s%d", providerIDPrefix, 44444), + }, + }, + } + additionalNodes = []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node-5", + Labels: map[string]string{"cilium-bgp-peering": "true"}, + }, + Spec: v1.NodeSpec{ + ProviderID: fmt.Sprintf("%s%d", providerIDPrefix, 55555), + }, + }, + } + publicIPv4 = net.ParseIP("45.76.101.25") + oldIpHolderInstance = linodego.Instance{ + ID: 12345, + Label: fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone), + Type: "g6-standard-1", + Region: "us-west", + IPv4: []*net.IP{&publicIPv4}, + } + newIpHolderInstance = linodego.Instance{} +) + +func TestCiliumCCMLoadBalancers(t *testing.T) { + testCases := []struct { + name string + f func(*testing.T, *mocks.MockClient) + }{ + { + name: "Create Cilium Load Balancer Without BGP Node Labels specified", + f: testNoBGPNodeLabel, + }, + { + name: "Create Cilium Load Balancer with unsupported region", + f: testUnsupportedRegion, + }, + { + name: "Create Cilium Load Balancer With explicit loadBalancerClass and existing IP holder nanode with old IP Holder naming convention", + f: testCreateWithExistingIPHolderWithOldIpHolderNamingConvention, + }, + { + name: "Create Cilium Load Balancer With explicit loadBalancerClass and existing IP holder nanode with new IP Holder naming convention", + f: testCreateWithExistingIPHolderWithNewIpHolderNamingConvention, + }, + { + name: "Create Cilium Load Balancer With explicit loadBalancerClass and existing IP holder nanode with new IP Holder naming convention and 63 char long suffix", + f: testCreateWithExistingIPHolderWithNewIpHolderNamingConventionUsingLongSuffix, + }, + { + name: "Create Cilium Load Balancer With no existing IP holder nanode and short suffix", + f: testCreateWithNoExistingIPHolderUsingShortSuffix, + }, + { + name: "Create Cilium Load Balancer With no existing IP holder nanode and no suffix", + f: testCreateWithNoExistingIPHolderUsingNoSuffix, + }, + { + name: "Create Cilium Load Balancer With no existing IP holder nanode and 63 char long suffix", + f: testCreateWithNoExistingIPHolderUsingLongSuffix, + }, + { + name: "Delete Cilium Load Balancer With Old IP Holder Naming Convention", + f: testEnsureCiliumLoadBalancerDeletedWithOldIpHolderNamingConvention, + }, + { + name: "Delete Cilium Load Balancer With New IP Holder Naming Convention", + f: testEnsureCiliumLoadBalancerDeletedWithNewIpHolderNamingConvention, + }, + { + name: "Add node to existing Cilium Load Balancer With Old IP Holder Naming Convention", + f: testCiliumUpdateLoadBalancerAddNodeWithOldIpHolderNamingConvention, + }, + { + name: "Add node to existing Cilium Load Balancer With New IP Holder Naming Convention", + f: testCiliumUpdateLoadBalancerAddNodeWithNewIpHolderNamingConvention, + }, + } + for _, tc := range testCases { + ctrl := gomock.NewController(t) + mc := mocks.NewMockClient(ctrl) + t.Run(tc.name, func(t *testing.T) { + defer ctrl.Finish() + tc.f(t, mc) + }) + } +} + +func createTestService() *v1.Service { + svc := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: randString(), + Namespace: "test-ns", + UID: "foobar123", + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{ + { + Name: randString(), + Protocol: "TCP", + Port: int32(80), + NodePort: int32(30000), + }, + { + Name: randString(), + Protocol: "TCP", + Port: int32(8080), + NodePort: int32(30001), + }, + }, + }, + } + + return svc +} + +func addService(t *testing.T, kubeClient kubernetes.Interface, svc *v1.Service) { + _, err := kubeClient.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("failed to add Service: %v", err) + } +} + +func addNodes(t *testing.T, kubeClient kubernetes.Interface, nodes []*v1.Node) { + for _, node := range nodes { + _, err := kubeClient.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}) + if err != nil { + t.Fatalf("failed to add Node: %v", err) + } + } +} + +func createNewIpHolderInstance() linodego.Instance { + return linodego.Instance{ + ID: 123456, + Label: generateClusterScopedIPHolderLinodeName(zone, Options.IpHolderSuffix), + Type: "g6-standard-1", + Region: "us-west", + IPv4: []*net.IP{&publicIPv4}, + } +} + +func testNoBGPNodeLabel(t *testing.T, mc *mocks.MockClient) { + Options.BGPNodeSelector = "" + Options.IpHolderSuffix = "linodelb" + t.Setenv("BGP_PEER_PREFIX", "2600:3cef") + svc := createTestService() + newIpHolderInstance = createNewIpHolderInstance() + + kubeClient, _ := k8sClient.NewFakeClientset() + ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} + addService(t, kubeClient, svc) + addNodes(t, kubeClient, nodes) + lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + + filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} + rawFilter, _ := json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) + filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(zone, Options.IpHolderSuffix)} + rawFilter, _ = json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) + dummySharedIP := "45.76.101.26" + mc.EXPECT().CreateInstance(gomock.Any(), gomock.Any()).Times(1).Return(&newIpHolderInstance, nil) + mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), newIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ + IPv4: &linodego.InstanceIPv4Response{ + Public: []*linodego.InstanceIP{{Address: publicIPv4.String()}, {Address: dummySharedIP}}, + }, + }, nil) + mc.EXPECT().AddInstanceIPAddress(gomock.Any(), newIpHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil) + mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ + IPs: []string{dummySharedIP}, + LinodeID: 11111, + }).Times(1) + mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ + IPs: []string{dummySharedIP}, + LinodeID: 22222, + }).Times(1) + mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ + IPs: []string{dummySharedIP}, + LinodeID: 33333, + }).Times(1) + + lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + if err != nil { + t.Fatalf("expected a nil error, got %v", err) + } + if lbStatus == nil { + t.Fatal("expected non-nil lbStatus") + } +} + +func testUnsupportedRegion(t *testing.T, mc *mocks.MockClient) { + Options.BGPNodeSelector = "cilium-bgp-peering=true" + svc := createTestService() + + kubeClient, _ := k8sClient.NewFakeClientset() + ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} + addService(t, kubeClient, svc) + lb := &loadbalancers{mc, "us-foobar", kubeClient, ciliumClient, ciliumLBType} + + lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + if err == nil { + t.Fatal("expected not nil error") + } + if lbStatus != nil { + t.Fatalf("expected a nil lbStatus, got %v", lbStatus) + } + + // Use BGP custom id map + t.Setenv("BGP_CUSTOM_ID_MAP", "{'us-foobar': 2}") + lb = &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + lbStatus, err = lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + if err == nil { + t.Fatal("expected not nil error") + } + if lbStatus != nil { + t.Fatalf("expected a nil lbStatus, got %v", lbStatus) + } +} + +func testCreateWithExistingIPHolderWithOldIpHolderNamingConvention(t *testing.T, mc *mocks.MockClient) { + Options.BGPNodeSelector = "cilium-bgp-peering=true" + svc := createTestService() + newIpHolderInstance = createNewIpHolderInstance() + + kubeClient, _ := k8sClient.NewFakeClientset() + ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} + addService(t, kubeClient, svc) + addNodes(t, kubeClient, nodes) + lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + + filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} + rawFilter, _ := json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{oldIpHolderInstance}, nil) + dummySharedIP := "45.76.101.26" + mc.EXPECT().AddInstanceIPAddress(gomock.Any(), oldIpHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil) + mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), oldIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ + IPv4: &linodego.InstanceIPv4Response{ + Public: []*linodego.InstanceIP{{Address: publicIPv4.String()}, {Address: dummySharedIP}}, + }, + }, nil) + mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ + IPs: []string{dummySharedIP}, + LinodeID: 11111, + }).Times(1) + mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ + IPs: []string{dummySharedIP}, + LinodeID: 22222, + }).Times(1) + + lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + if err != nil { + t.Fatalf("expected a nil error, got %v", err) + } + if lbStatus == nil { + t.Fatal("expected non-nil lbStatus") + } +} + +func testCreateWithExistingIPHolderWithNewIpHolderNamingConvention(t *testing.T, mc *mocks.MockClient) { + Options.BGPNodeSelector = "cilium-bgp-peering=true" + Options.IpHolderSuffix = "linodelb" + svc := createTestService() + newIpHolderInstance = createNewIpHolderInstance() + + kubeClient, _ := k8sClient.NewFakeClientset() + ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} + addService(t, kubeClient, svc) + addNodes(t, kubeClient, nodes) + lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + + filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} + rawFilter, _ := json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{oldIpHolderInstance}, nil) + dummySharedIP := "45.76.101.26" + mc.EXPECT().AddInstanceIPAddress(gomock.Any(), oldIpHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil) + mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), oldIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ + IPv4: &linodego.InstanceIPv4Response{ + Public: []*linodego.InstanceIP{{Address: publicIPv4.String()}, {Address: dummySharedIP}}, + }, + }, nil) + mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ + IPs: []string{dummySharedIP}, + LinodeID: 11111, + }).Times(1) + mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ + IPs: []string{dummySharedIP}, + LinodeID: 22222, + }).Times(1) + + lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + if err != nil { + t.Fatalf("expected a nil error, got %v", err) + } + if lbStatus == nil { + t.Fatal("expected non-nil lbStatus") + } +} + +func testCreateWithExistingIPHolderWithNewIpHolderNamingConventionUsingLongSuffix(t *testing.T, mc *mocks.MockClient) { + Options.BGPNodeSelector = "cilium-bgp-peering=true" + Options.IpHolderSuffix = "OaTJrRuufacHVougjwkpBpmstiqvswvBNEMWXsRYfMBTCkKIUTXpbGIcIbDWSQp" + svc := createTestService() + newIpHolderInstance = createNewIpHolderInstance() + + kubeClient, _ := k8sClient.NewFakeClientset() + ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} + addService(t, kubeClient, svc) + addNodes(t, kubeClient, nodes) + lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + + filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} + rawFilter, _ := json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{oldIpHolderInstance}, nil) + dummySharedIP := "45.76.101.26" + mc.EXPECT().AddInstanceIPAddress(gomock.Any(), oldIpHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil) + mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), oldIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ + IPv4: &linodego.InstanceIPv4Response{ + Public: []*linodego.InstanceIP{{Address: publicIPv4.String()}, {Address: dummySharedIP}}, + }, + }, nil) + mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ + IPs: []string{dummySharedIP}, + LinodeID: 11111, + }).Times(1) + mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ + IPs: []string{dummySharedIP}, + LinodeID: 22222, + }).Times(1) + + lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + if err != nil { + t.Fatalf("expected a nil error, got %v", err) + } + if lbStatus == nil { + t.Fatal("expected non-nil lbStatus") + } +} + +func testCreateWithNoExistingIPHolderUsingNoSuffix(t *testing.T, mc *mocks.MockClient) { + Options.BGPNodeSelector = "cilium-bgp-peering=true" + Options.IpHolderSuffix = "" + svc := createTestService() + newIpHolderInstance = createNewIpHolderInstance() + + kubeClient, _ := k8sClient.NewFakeClientset() + ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} + addService(t, kubeClient, svc) + addNodes(t, kubeClient, nodes) + lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + + filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} + rawFilter, _ := json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) + filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(zone, Options.IpHolderSuffix)} + rawFilter, _ = json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) + dummySharedIP := "45.76.101.26" + mc.EXPECT().CreateInstance(gomock.Any(), gomock.Any()).Times(1).Return(&newIpHolderInstance, nil) + mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), newIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ + IPv4: &linodego.InstanceIPv4Response{ + Public: []*linodego.InstanceIP{{Address: publicIPv4.String()}, {Address: dummySharedIP}}, + }, + }, nil) + mc.EXPECT().AddInstanceIPAddress(gomock.Any(), newIpHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil) + mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ + IPs: []string{dummySharedIP}, + LinodeID: 11111, + }).Times(1) + mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ + IPs: []string{dummySharedIP}, + LinodeID: 22222, + }).Times(1) + + lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + if err != nil { + t.Fatalf("expected a nil error, got %v", err) + } + if lbStatus == nil { + t.Fatal("expected non-nil lbStatus") + } +} + +func testCreateWithNoExistingIPHolderUsingShortSuffix(t *testing.T, mc *mocks.MockClient) { + Options.BGPNodeSelector = "cilium-bgp-peering=true" + Options.IpHolderSuffix = "linodelb" + svc := createTestService() + newIpHolderInstance = createNewIpHolderInstance() + + kubeClient, _ := k8sClient.NewFakeClientset() + ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} + addService(t, kubeClient, svc) + addNodes(t, kubeClient, nodes) + lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + + filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} + rawFilter, _ := json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) + filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(zone, Options.IpHolderSuffix)} + rawFilter, _ = json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) + dummySharedIP := "45.76.101.26" + mc.EXPECT().CreateInstance(gomock.Any(), gomock.Any()).Times(1).Return(&newIpHolderInstance, nil) + mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), newIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ + IPv4: &linodego.InstanceIPv4Response{ + Public: []*linodego.InstanceIP{{Address: publicIPv4.String()}, {Address: dummySharedIP}}, + }, + }, nil) + mc.EXPECT().AddInstanceIPAddress(gomock.Any(), newIpHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil) + mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ + IPs: []string{dummySharedIP}, + LinodeID: 11111, + }).Times(1) + mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ + IPs: []string{dummySharedIP}, + LinodeID: 22222, + }).Times(1) + + lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + if err != nil { + t.Fatalf("expected a nil error, got %v", err) + } + if lbStatus == nil { + t.Fatal("expected non-nil lbStatus") + } +} + +func testCreateWithNoExistingIPHolderUsingLongSuffix(t *testing.T, mc *mocks.MockClient) { + Options.BGPNodeSelector = "cilium-bgp-peering=true" + Options.IpHolderSuffix = "OaTJrRuufacHVougjwkpBpmstiqvswvBNEMWXsRYfMBTCkKIUTXpbGIcIbDWSQp" + svc := createTestService() + newIpHolderInstance = createNewIpHolderInstance() + + kubeClient, _ := k8sClient.NewFakeClientset() + ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} + addService(t, kubeClient, svc) + addNodes(t, kubeClient, nodes) + lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + + filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} + rawFilter, _ := json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) + filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(zone, Options.IpHolderSuffix)} + rawFilter, _ = json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) + dummySharedIP := "45.76.101.26" + mc.EXPECT().CreateInstance(gomock.Any(), gomock.Any()).Times(1).Return(&newIpHolderInstance, nil) + mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), newIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ + IPv4: &linodego.InstanceIPv4Response{ + Public: []*linodego.InstanceIP{{Address: publicIPv4.String()}, {Address: dummySharedIP}}, + }, + }, nil) + mc.EXPECT().AddInstanceIPAddress(gomock.Any(), newIpHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil) + mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ + IPs: []string{dummySharedIP}, + LinodeID: 11111, + }).Times(1) + mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ + IPs: []string{dummySharedIP}, + LinodeID: 22222, + }).Times(1) + + lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + if err != nil { + t.Fatalf("expected a nil error, got %v", err) + } + if lbStatus == nil { + t.Fatal("expected non-nil lbStatus") + } +} + +func testEnsureCiliumLoadBalancerDeletedWithOldIpHolderNamingConvention(t *testing.T, mc *mocks.MockClient) { + Options.BGPNodeSelector = "cilium-bgp-peering=true" + svc := createTestService() + + kubeClient, _ := k8sClient.NewFakeClientset() + ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} + addService(t, kubeClient, svc) + addNodes(t, kubeClient, nodes) + lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + + dummySharedIP := "45.76.101.26" + svc.Status.LoadBalancer = v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: dummySharedIP}}} + + filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} + rawFilter, _ := json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{oldIpHolderInstance}, nil) + mc.EXPECT().DeleteInstanceIPAddress(gomock.Any(), 11111, dummySharedIP).Times(1).Return(nil) + mc.EXPECT().DeleteInstanceIPAddress(gomock.Any(), 22222, dummySharedIP).Times(1).Return(nil) + mc.EXPECT().DeleteInstanceIPAddress(gomock.Any(), oldIpHolderInstance.ID, dummySharedIP).Times(1).Return(nil) + + err := lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) + if err != nil { + t.Fatalf("expected a nil error, got %v", err) + } +} + +func testEnsureCiliumLoadBalancerDeletedWithNewIpHolderNamingConvention(t *testing.T, mc *mocks.MockClient) { + Options.BGPNodeSelector = "cilium-bgp-peering=true" + Options.IpHolderSuffix = "linodelb" + svc := createTestService() + newIpHolderInstance = createNewIpHolderInstance() + + kubeClient, _ := k8sClient.NewFakeClientset() + ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} + addService(t, kubeClient, svc) + addNodes(t, kubeClient, nodes) + lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + + dummySharedIP := "45.76.101.26" + svc.Status.LoadBalancer = v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: dummySharedIP}}} + + filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} + rawFilter, _ := json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) + filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(zone, Options.IpHolderSuffix)} + rawFilter, _ = json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{newIpHolderInstance}, nil) + mc.EXPECT().DeleteInstanceIPAddress(gomock.Any(), 11111, dummySharedIP).Times(1).Return(nil) + mc.EXPECT().DeleteInstanceIPAddress(gomock.Any(), 22222, dummySharedIP).Times(1).Return(nil) + mc.EXPECT().DeleteInstanceIPAddress(gomock.Any(), newIpHolderInstance.ID, dummySharedIP).Times(1).Return(nil) + + err := lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) + if err != nil { + t.Fatalf("expected a nil error, got %v", err) + } +} + +func testCiliumUpdateLoadBalancerAddNodeWithOldIpHolderNamingConvention(t *testing.T, mc *mocks.MockClient) { + Options.BGPNodeSelector = "cilium-bgp-peering=true" + svc := createTestService() + + kubeClient, _ := k8sClient.NewFakeClientset() + ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} + addService(t, kubeClient, svc) + addNodes(t, kubeClient, nodes) + lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + + filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} + rawFilter, _ := json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{oldIpHolderInstance}, nil) + dummySharedIP := "45.76.101.26" + mc.EXPECT().AddInstanceIPAddress(gomock.Any(), oldIpHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil) + mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), oldIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ + IPv4: &linodego.InstanceIPv4Response{ + Public: []*linodego.InstanceIP{{Address: publicIPv4.String()}, {Address: dummySharedIP}}, + }, + }, nil) + mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ + IPs: []string{dummySharedIP}, + LinodeID: 11111, + }).Times(1) + mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ + IPs: []string{dummySharedIP}, + LinodeID: 22222, + }).Times(1) + + lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + if err != nil { + t.Fatalf("expected a nil error, got %v", err) + } + if lbStatus == nil { + t.Fatal("expected non-nil lbStatus") + } + + // Now add another node to the cluster and assert that it gets the shared IP + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{oldIpHolderInstance}, nil) + mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), oldIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ + IPv4: &linodego.InstanceIPv4Response{ + Public: []*linodego.InstanceIP{{Address: publicIPv4.String()}, {Address: dummySharedIP}}, + }, + }, nil) + mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ + IPs: []string{dummySharedIP}, + LinodeID: 55555, + }).Times(1) + addNodes(t, kubeClient, additionalNodes) + + err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, additionalNodes) + if err != nil { + t.Fatalf("expected a nil error, got %v", err) + } +} + +func testCiliumUpdateLoadBalancerAddNodeWithNewIpHolderNamingConvention(t *testing.T, mc *mocks.MockClient) { + Options.BGPNodeSelector = "cilium-bgp-peering=true" + Options.IpHolderSuffix = "linodelb" + svc := createTestService() + newIpHolderInstance = createNewIpHolderInstance() + + kubeClient, _ := k8sClient.NewFakeClientset() + ciliumClient := &fakev2alpha1.FakeCiliumV2alpha1{Fake: &kubeClient.CiliumFakeClientset.Fake} + addService(t, kubeClient, svc) + addNodes(t, kubeClient, nodes) + lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} + + filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} + rawFilter, _ := json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) + filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(zone, Options.IpHolderSuffix)} + rawFilter, _ = json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{newIpHolderInstance}, nil) + dummySharedIP := "45.76.101.26" + mc.EXPECT().AddInstanceIPAddress(gomock.Any(), newIpHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil) + mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), newIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ + IPv4: &linodego.InstanceIPv4Response{ + Public: []*linodego.InstanceIP{{Address: publicIPv4.String()}, {Address: dummySharedIP}}, + }, + }, nil) + mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ + IPs: []string{dummySharedIP}, + LinodeID: 11111, + }).Times(1) + mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ + IPs: []string{dummySharedIP}, + LinodeID: 22222, + }).Times(1) + + lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + if err != nil { + t.Fatalf("expected a nil error, got %v", err) + } + if lbStatus == nil { + t.Fatal("expected non-nil lbStatus") + } + + // Now add another node to the cluster and assert that it gets the shared IP + filter = map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} + rawFilter, _ = json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) + filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(zone, Options.IpHolderSuffix)} + rawFilter, _ = json.Marshal(filter) + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{newIpHolderInstance}, nil) + + mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), newIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ + IPv4: &linodego.InstanceIPv4Response{ + Public: []*linodego.InstanceIP{{Address: publicIPv4.String()}, {Address: dummySharedIP}}, + }, + }, nil) + mc.EXPECT().ShareIPAddresses(gomock.Any(), linodego.IPAddressesShareOptions{ + IPs: []string{dummySharedIP}, + LinodeID: 55555, + }).Times(1) + addNodes(t, kubeClient, additionalNodes) + + err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, additionalNodes) + if err != nil { + t.Fatalf("expected a nil error, got %v", err) + } +} diff --git a/cloud/linode/client.go b/cloud/linode/client.go deleted file mode 100644 index a2fcde09..00000000 --- a/cloud/linode/client.go +++ /dev/null @@ -1,70 +0,0 @@ -package linode - -//go:generate go run github.com/golang/mock/mockgen -destination mock_client_test.go -package linode github.com/linode/linode-cloud-controller-manager/cloud/linode Client - -import ( - "context" - "net/url" - "regexp" - "strings" - - "github.com/linode/linodego" -) - -type Client interface { - GetInstance(context.Context, int) (*linodego.Instance, error) - ListInstances(context.Context, *linodego.ListOptions) ([]linodego.Instance, error) - GetInstanceIPAddresses(context.Context, int) (*linodego.InstanceIPAddressResponse, error) - - CreateNodeBalancer(context.Context, linodego.NodeBalancerCreateOptions) (*linodego.NodeBalancer, error) - GetNodeBalancer(context.Context, int) (*linodego.NodeBalancer, error) - UpdateNodeBalancer(context.Context, int, linodego.NodeBalancerUpdateOptions) (*linodego.NodeBalancer, error) - DeleteNodeBalancer(context.Context, int) error - ListNodeBalancers(context.Context, *linodego.ListOptions) ([]linodego.NodeBalancer, error) - - CreateNodeBalancerConfig(context.Context, int, linodego.NodeBalancerConfigCreateOptions) (*linodego.NodeBalancerConfig, error) - DeleteNodeBalancerConfig(context.Context, int, int) error - ListNodeBalancerConfigs(context.Context, int, *linodego.ListOptions) ([]linodego.NodeBalancerConfig, error) - RebuildNodeBalancerConfig(context.Context, int, int, linodego.NodeBalancerConfigRebuildOptions) (*linodego.NodeBalancerConfig, error) - ListNodeBalancerFirewalls(ctx context.Context, nodebalancerID int, opts *linodego.ListOptions) ([]linodego.Firewall, error) - ListFirewallDevices(ctx context.Context, firewallID int, opts *linodego.ListOptions) ([]linodego.FirewallDevice, error) - DeleteFirewallDevice(ctx context.Context, firewallID, deviceID int) error - CreateFirewallDevice(ctx context.Context, firewallID int, opts linodego.FirewallDeviceCreateOptions) (*linodego.FirewallDevice, error) - CreateFirewall(ctx context.Context, opts linodego.FirewallCreateOptions) (*linodego.Firewall, error) - DeleteFirewall(ctx context.Context, fwid int) error - GetFirewall(context.Context, int) (*linodego.Firewall, error) - UpdateFirewallRules(context.Context, int, linodego.FirewallRuleSet) (*linodego.FirewallRuleSet, error) -} - -// linodego.Client implements Client -var _ Client = (*linodego.Client)(nil) - -func newLinodeClient(token, ua, apiURL string) (*linodego.Client, error) { - linodeClient := linodego.NewClient(nil) - linodeClient.SetUserAgent(ua) - linodeClient.SetToken(token) - - // Validate apiURL - parsedURL, err := url.Parse(apiURL) - if err != nil { - return nil, err - } - - validatedURL := &url.URL{ - Host: parsedURL.Host, - Scheme: parsedURL.Scheme, - } - - linodeClient.SetBaseURL(validatedURL.String()) - - version := "" - matches := regexp.MustCompile(`/v\d+`).FindAllString(parsedURL.Path, -1) - - if len(matches) > 0 { - version = strings.Trim(matches[len(matches)-1], "/") - } - - linodeClient.SetAPIVersion(version) - - return &linodeClient, nil -} diff --git a/cloud/linode/client/client.go b/cloud/linode/client/client.go new file mode 100644 index 00000000..6599839b --- /dev/null +++ b/cloud/linode/client/client.go @@ -0,0 +1,98 @@ +package client + +//go:generate go run github.com/golang/mock/mockgen -destination mocks/mock_client.go -package mocks github.com/linode/linode-cloud-controller-manager/cloud/linode/client Client +//go:generate go run github.com/hexdigest/gowrap/cmd/gowrap gen -g -p github.com/linode/linode-cloud-controller-manager/cloud/linode/client -i Client -t ../../../hack/templates/prometheus.go.gotpl -o client_with_metrics.go -l "" + +import ( + "context" + "errors" + "fmt" + "net/http" + "os" + "time" + + _ "github.com/hexdigest/gowrap" + "github.com/linode/linodego" + "k8s.io/klog/v2" +) + +const ( + // DefaultClientTimeout is the default timeout for a client Linode API call + DefaultClientTimeout = 120 * time.Second +) + +type Client interface { + GetInstance(context.Context, int) (*linodego.Instance, error) + ListInstances(context.Context, *linodego.ListOptions) ([]linodego.Instance, error) + CreateInstance(ctx context.Context, opts linodego.InstanceCreateOptions) (*linodego.Instance, error) + + GetInstanceIPAddresses(context.Context, int) (*linodego.InstanceIPAddressResponse, error) + AddInstanceIPAddress(ctx context.Context, linodeID int, public bool) (*linodego.InstanceIP, error) + DeleteInstanceIPAddress(ctx context.Context, linodeID int, ipAddress string) error + ShareIPAddresses(ctx context.Context, opts linodego.IPAddressesShareOptions) error + + UpdateInstanceConfigInterface(context.Context, int, int, int, linodego.InstanceConfigInterfaceUpdateOptions) (*linodego.InstanceConfigInterface, error) + + ListVPCs(context.Context, *linodego.ListOptions) ([]linodego.VPC, error) + ListVPCIPAddresses(context.Context, int, *linodego.ListOptions) ([]linodego.VPCIP, error) + + CreateNodeBalancer(context.Context, linodego.NodeBalancerCreateOptions) (*linodego.NodeBalancer, error) + GetNodeBalancer(context.Context, int) (*linodego.NodeBalancer, error) + UpdateNodeBalancer(context.Context, int, linodego.NodeBalancerUpdateOptions) (*linodego.NodeBalancer, error) + DeleteNodeBalancer(context.Context, int) error + ListNodeBalancers(context.Context, *linodego.ListOptions) ([]linodego.NodeBalancer, error) + ListNodeBalancerNodes(context.Context, int, int, *linodego.ListOptions) ([]linodego.NodeBalancerNode, error) + + CreateNodeBalancerConfig(context.Context, int, linodego.NodeBalancerConfigCreateOptions) (*linodego.NodeBalancerConfig, error) + DeleteNodeBalancerConfig(context.Context, int, int) error + ListNodeBalancerConfigs(context.Context, int, *linodego.ListOptions) ([]linodego.NodeBalancerConfig, error) + RebuildNodeBalancerConfig(context.Context, int, int, linodego.NodeBalancerConfigRebuildOptions) (*linodego.NodeBalancerConfig, error) + ListNodeBalancerFirewalls(ctx context.Context, nodebalancerID int, opts *linodego.ListOptions) ([]linodego.Firewall, error) + ListFirewallDevices(ctx context.Context, firewallID int, opts *linodego.ListOptions) ([]linodego.FirewallDevice, error) + DeleteFirewallDevice(ctx context.Context, firewallID, deviceID int) error + CreateFirewallDevice(ctx context.Context, firewallID int, opts linodego.FirewallDeviceCreateOptions) (*linodego.FirewallDevice, error) + CreateFirewall(ctx context.Context, opts linodego.FirewallCreateOptions) (*linodego.Firewall, error) + DeleteFirewall(ctx context.Context, fwid int) error + GetFirewall(context.Context, int) (*linodego.Firewall, error) + UpdateFirewallRules(context.Context, int, linodego.FirewallRuleSet) (*linodego.FirewallRuleSet, error) + + GetProfile(ctx context.Context) (*linodego.Profile, error) +} + +// linodego.Client implements Client +var _ Client = (*linodego.Client)(nil) + +// New creates a new linode client with a given token and default timeout +func New(token string, timeout time.Duration) (*linodego.Client, error) { + userAgent := fmt.Sprintf("linode-cloud-controller-manager %s", linodego.DefaultUserAgent) + apiURL := os.Getenv("LINODE_URL") + + linodeClient := linodego.NewClient(&http.Client{Timeout: timeout}) + client, err := linodeClient.UseURL(apiURL) + if err != nil { + return nil, err + } + client.SetUserAgent(userAgent) + client.SetToken(token) + + klog.V(3).Infof("Linode client created with default timeout of %v", timeout) + return client, nil +} + +func CheckClientAuthenticated(ctx context.Context, client Client) (bool, error) { + _, err := client.GetProfile(ctx) + if err == nil { + return true, nil + } + + var linodeErr *linodego.Error + if !errors.As(err, &linodeErr) { + return false, err + } + + if linodego.ErrHasStatus(err, http.StatusUnauthorized) { + return false, nil + } + + return false, err +} diff --git a/cloud/linode/client/client_with_metrics.go b/cloud/linode/client/client_with_metrics.go new file mode 100644 index 00000000..d87e0bbd --- /dev/null +++ b/cloud/linode/client/client_with_metrics.go @@ -0,0 +1,411 @@ +// Code generated by gowrap. DO NOT EDIT. +// template: ../../../hack/templates/prometheus.go.gotpl +// gowrap: http://github.com/hexdigest/gowrap + +package client + +import ( + "context" + + _ "github.com/hexdigest/gowrap" + "github.com/linode/linodego" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +// ClientWithPrometheus implements Client interface with all methods wrapped +// with Prometheus counters +type ClientWithPrometheus struct { + base Client +} + +var ClientMethodCounterVec = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "ccm_linode_client_requests_total", + Help: "client counters for each operation and its result", + }, + []string{"method", "result"}) + +// NewClientWithPrometheus returns an instance of the Client decorated with prometheus metrics +func NewClientWithPrometheus(base Client) ClientWithPrometheus { + return ClientWithPrometheus{ + base: base, + } +} + +// AddInstanceIPAddress implements Client +func (_d ClientWithPrometheus) AddInstanceIPAddress(ctx context.Context, linodeID int, public bool) (ip1 *linodego.InstanceIP, err error) { + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + ClientMethodCounterVec.WithLabelValues("AddInstanceIPAddress", result).Inc() + }() + return _d.base.AddInstanceIPAddress(ctx, linodeID, public) +} + +// CreateFirewall implements Client +func (_d ClientWithPrometheus) CreateFirewall(ctx context.Context, opts linodego.FirewallCreateOptions) (fp1 *linodego.Firewall, err error) { + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + ClientMethodCounterVec.WithLabelValues("CreateFirewall", result).Inc() + }() + return _d.base.CreateFirewall(ctx, opts) +} + +// CreateFirewallDevice implements Client +func (_d ClientWithPrometheus) CreateFirewallDevice(ctx context.Context, firewallID int, opts linodego.FirewallDeviceCreateOptions) (fp1 *linodego.FirewallDevice, err error) { + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + ClientMethodCounterVec.WithLabelValues("CreateFirewallDevice", result).Inc() + }() + return _d.base.CreateFirewallDevice(ctx, firewallID, opts) +} + +// CreateInstance implements Client +func (_d ClientWithPrometheus) CreateInstance(ctx context.Context, opts linodego.InstanceCreateOptions) (ip1 *linodego.Instance, err error) { + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + ClientMethodCounterVec.WithLabelValues("CreateInstance", result).Inc() + }() + return _d.base.CreateInstance(ctx, opts) +} + +// CreateNodeBalancer implements Client +func (_d ClientWithPrometheus) CreateNodeBalancer(ctx context.Context, n1 linodego.NodeBalancerCreateOptions) (np1 *linodego.NodeBalancer, err error) { + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + ClientMethodCounterVec.WithLabelValues("CreateNodeBalancer", result).Inc() + }() + return _d.base.CreateNodeBalancer(ctx, n1) +} + +// CreateNodeBalancerConfig implements Client +func (_d ClientWithPrometheus) CreateNodeBalancerConfig(ctx context.Context, i1 int, n1 linodego.NodeBalancerConfigCreateOptions) (np1 *linodego.NodeBalancerConfig, err error) { + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + ClientMethodCounterVec.WithLabelValues("CreateNodeBalancerConfig", result).Inc() + }() + return _d.base.CreateNodeBalancerConfig(ctx, i1, n1) +} + +// DeleteFirewall implements Client +func (_d ClientWithPrometheus) DeleteFirewall(ctx context.Context, fwid int) (err error) { + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + ClientMethodCounterVec.WithLabelValues("DeleteFirewall", result).Inc() + }() + return _d.base.DeleteFirewall(ctx, fwid) +} + +// DeleteFirewallDevice implements Client +func (_d ClientWithPrometheus) DeleteFirewallDevice(ctx context.Context, firewallID int, deviceID int) (err error) { + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + ClientMethodCounterVec.WithLabelValues("DeleteFirewallDevice", result).Inc() + }() + return _d.base.DeleteFirewallDevice(ctx, firewallID, deviceID) +} + +// DeleteInstanceIPAddress implements Client +func (_d ClientWithPrometheus) DeleteInstanceIPAddress(ctx context.Context, linodeID int, ipAddress string) (err error) { + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + ClientMethodCounterVec.WithLabelValues("DeleteInstanceIPAddress", result).Inc() + }() + return _d.base.DeleteInstanceIPAddress(ctx, linodeID, ipAddress) +} + +// DeleteNodeBalancer implements Client +func (_d ClientWithPrometheus) DeleteNodeBalancer(ctx context.Context, i1 int) (err error) { + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + ClientMethodCounterVec.WithLabelValues("DeleteNodeBalancer", result).Inc() + }() + return _d.base.DeleteNodeBalancer(ctx, i1) +} + +// DeleteNodeBalancerConfig implements Client +func (_d ClientWithPrometheus) DeleteNodeBalancerConfig(ctx context.Context, i1 int, i2 int) (err error) { + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + ClientMethodCounterVec.WithLabelValues("DeleteNodeBalancerConfig", result).Inc() + }() + return _d.base.DeleteNodeBalancerConfig(ctx, i1, i2) +} + +// GetFirewall implements Client +func (_d ClientWithPrometheus) GetFirewall(ctx context.Context, i1 int) (fp1 *linodego.Firewall, err error) { + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + ClientMethodCounterVec.WithLabelValues("GetFirewall", result).Inc() + }() + return _d.base.GetFirewall(ctx, i1) +} + +// GetInstance implements Client +func (_d ClientWithPrometheus) GetInstance(ctx context.Context, i1 int) (ip1 *linodego.Instance, err error) { + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + ClientMethodCounterVec.WithLabelValues("GetInstance", result).Inc() + }() + return _d.base.GetInstance(ctx, i1) +} + +// GetInstanceIPAddresses implements Client +func (_d ClientWithPrometheus) GetInstanceIPAddresses(ctx context.Context, i1 int) (ip1 *linodego.InstanceIPAddressResponse, err error) { + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + ClientMethodCounterVec.WithLabelValues("GetInstanceIPAddresses", result).Inc() + }() + return _d.base.GetInstanceIPAddresses(ctx, i1) +} + +// GetNodeBalancer implements Client +func (_d ClientWithPrometheus) GetNodeBalancer(ctx context.Context, i1 int) (np1 *linodego.NodeBalancer, err error) { + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + ClientMethodCounterVec.WithLabelValues("GetNodeBalancer", result).Inc() + }() + return _d.base.GetNodeBalancer(ctx, i1) +} + +// GetProfile implements Client +func (_d ClientWithPrometheus) GetProfile(ctx context.Context) (pp1 *linodego.Profile, err error) { + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + ClientMethodCounterVec.WithLabelValues("GetProfile", result).Inc() + }() + return _d.base.GetProfile(ctx) +} + +// ListFirewallDevices implements Client +func (_d ClientWithPrometheus) ListFirewallDevices(ctx context.Context, firewallID int, opts *linodego.ListOptions) (fa1 []linodego.FirewallDevice, err error) { + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + ClientMethodCounterVec.WithLabelValues("ListFirewallDevices", result).Inc() + }() + return _d.base.ListFirewallDevices(ctx, firewallID, opts) +} + +// ListInstances implements Client +func (_d ClientWithPrometheus) ListInstances(ctx context.Context, lp1 *linodego.ListOptions) (ia1 []linodego.Instance, err error) { + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + ClientMethodCounterVec.WithLabelValues("ListInstances", result).Inc() + }() + return _d.base.ListInstances(ctx, lp1) +} + +// ListNodeBalancerConfigs implements Client +func (_d ClientWithPrometheus) ListNodeBalancerConfigs(ctx context.Context, i1 int, lp1 *linodego.ListOptions) (na1 []linodego.NodeBalancerConfig, err error) { + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + ClientMethodCounterVec.WithLabelValues("ListNodeBalancerConfigs", result).Inc() + }() + return _d.base.ListNodeBalancerConfigs(ctx, i1, lp1) +} + +// ListNodeBalancerFirewalls implements Client +func (_d ClientWithPrometheus) ListNodeBalancerFirewalls(ctx context.Context, nodebalancerID int, opts *linodego.ListOptions) (fa1 []linodego.Firewall, err error) { + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + ClientMethodCounterVec.WithLabelValues("ListNodeBalancerFirewalls", result).Inc() + }() + return _d.base.ListNodeBalancerFirewalls(ctx, nodebalancerID, opts) +} + +// ListNodeBalancerNodes implements Client +func (_d ClientWithPrometheus) ListNodeBalancerNodes(ctx context.Context, i1 int, i2 int, lp1 *linodego.ListOptions) (na1 []linodego.NodeBalancerNode, err error) { + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + ClientMethodCounterVec.WithLabelValues("ListNodeBalancerNodes", result).Inc() + }() + return _d.base.ListNodeBalancerNodes(ctx, i1, i2, lp1) +} + +// ListNodeBalancers implements Client +func (_d ClientWithPrometheus) ListNodeBalancers(ctx context.Context, lp1 *linodego.ListOptions) (na1 []linodego.NodeBalancer, err error) { + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + ClientMethodCounterVec.WithLabelValues("ListNodeBalancers", result).Inc() + }() + return _d.base.ListNodeBalancers(ctx, lp1) +} + +// ListVPCIPAddresses implements Client +func (_d ClientWithPrometheus) ListVPCIPAddresses(ctx context.Context, i1 int, lp1 *linodego.ListOptions) (va1 []linodego.VPCIP, err error) { + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + ClientMethodCounterVec.WithLabelValues("ListVPCIPAddresses", result).Inc() + }() + return _d.base.ListVPCIPAddresses(ctx, i1, lp1) +} + +// ListVPCs implements Client +func (_d ClientWithPrometheus) ListVPCs(ctx context.Context, lp1 *linodego.ListOptions) (va1 []linodego.VPC, err error) { + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + ClientMethodCounterVec.WithLabelValues("ListVPCs", result).Inc() + }() + return _d.base.ListVPCs(ctx, lp1) +} + +// RebuildNodeBalancerConfig implements Client +func (_d ClientWithPrometheus) RebuildNodeBalancerConfig(ctx context.Context, i1 int, i2 int, n1 linodego.NodeBalancerConfigRebuildOptions) (np1 *linodego.NodeBalancerConfig, err error) { + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + ClientMethodCounterVec.WithLabelValues("RebuildNodeBalancerConfig", result).Inc() + }() + return _d.base.RebuildNodeBalancerConfig(ctx, i1, i2, n1) +} + +// ShareIPAddresses implements Client +func (_d ClientWithPrometheus) ShareIPAddresses(ctx context.Context, opts linodego.IPAddressesShareOptions) (err error) { + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + ClientMethodCounterVec.WithLabelValues("ShareIPAddresses", result).Inc() + }() + return _d.base.ShareIPAddresses(ctx, opts) +} + +// UpdateFirewallRules implements Client +func (_d ClientWithPrometheus) UpdateFirewallRules(ctx context.Context, i1 int, f1 linodego.FirewallRuleSet) (fp1 *linodego.FirewallRuleSet, err error) { + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + ClientMethodCounterVec.WithLabelValues("UpdateFirewallRules", result).Inc() + }() + return _d.base.UpdateFirewallRules(ctx, i1, f1) +} + +// UpdateInstanceConfigInterface implements Client +func (_d ClientWithPrometheus) UpdateInstanceConfigInterface(ctx context.Context, i1 int, i2 int, i3 int, i4 linodego.InstanceConfigInterfaceUpdateOptions) (ip1 *linodego.InstanceConfigInterface, err error) { + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + ClientMethodCounterVec.WithLabelValues("UpdateInstanceConfigInterface", result).Inc() + }() + return _d.base.UpdateInstanceConfigInterface(ctx, i1, i2, i3, i4) +} + +// UpdateNodeBalancer implements Client +func (_d ClientWithPrometheus) UpdateNodeBalancer(ctx context.Context, i1 int, n1 linodego.NodeBalancerUpdateOptions) (np1 *linodego.NodeBalancer, err error) { + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + ClientMethodCounterVec.WithLabelValues("UpdateNodeBalancer", result).Inc() + }() + return _d.base.UpdateNodeBalancer(ctx, i1, n1) +} diff --git a/cloud/linode/mock_client_test.go b/cloud/linode/client/mocks/mock_client.go similarity index 70% rename from cloud/linode/mock_client_test.go rename to cloud/linode/client/mocks/mock_client.go index d7f5b984..c986aef2 100644 --- a/cloud/linode/mock_client_test.go +++ b/cloud/linode/client/mocks/mock_client.go @@ -1,8 +1,8 @@ // Code generated by MockGen. DO NOT EDIT. -// Source: github.com/linode/linode-cloud-controller-manager/cloud/linode (interfaces: Client) +// Source: github.com/linode/linode-cloud-controller-manager/cloud/linode/client (interfaces: Client) -// Package linode is a generated GoMock package. -package linode +// Package mocks is a generated GoMock package. +package mocks import ( context "context" @@ -35,6 +35,21 @@ func (m *MockClient) EXPECT() *MockClientMockRecorder { return m.recorder } +// AddInstanceIPAddress mocks base method. +func (m *MockClient) AddInstanceIPAddress(arg0 context.Context, arg1 int, arg2 bool) (*linodego.InstanceIP, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AddInstanceIPAddress", arg0, arg1, arg2) + ret0, _ := ret[0].(*linodego.InstanceIP) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AddInstanceIPAddress indicates an expected call of AddInstanceIPAddress. +func (mr *MockClientMockRecorder) AddInstanceIPAddress(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddInstanceIPAddress", reflect.TypeOf((*MockClient)(nil).AddInstanceIPAddress), arg0, arg1, arg2) +} + // CreateFirewall mocks base method. func (m *MockClient) CreateFirewall(arg0 context.Context, arg1 linodego.FirewallCreateOptions) (*linodego.Firewall, error) { m.ctrl.T.Helper() @@ -65,6 +80,21 @@ func (mr *MockClientMockRecorder) CreateFirewallDevice(arg0, arg1, arg2 interfac return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateFirewallDevice", reflect.TypeOf((*MockClient)(nil).CreateFirewallDevice), arg0, arg1, arg2) } +// CreateInstance mocks base method. +func (m *MockClient) CreateInstance(arg0 context.Context, arg1 linodego.InstanceCreateOptions) (*linodego.Instance, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateInstance", arg0, arg1) + ret0, _ := ret[0].(*linodego.Instance) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateInstance indicates an expected call of CreateInstance. +func (mr *MockClientMockRecorder) CreateInstance(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateInstance", reflect.TypeOf((*MockClient)(nil).CreateInstance), arg0, arg1) +} + // CreateNodeBalancer mocks base method. func (m *MockClient) CreateNodeBalancer(arg0 context.Context, arg1 linodego.NodeBalancerCreateOptions) (*linodego.NodeBalancer, error) { m.ctrl.T.Helper() @@ -123,6 +153,20 @@ func (mr *MockClientMockRecorder) DeleteFirewallDevice(arg0, arg1, arg2 interfac return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteFirewallDevice", reflect.TypeOf((*MockClient)(nil).DeleteFirewallDevice), arg0, arg1, arg2) } +// DeleteInstanceIPAddress mocks base method. +func (m *MockClient) DeleteInstanceIPAddress(arg0 context.Context, arg1 int, arg2 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteInstanceIPAddress", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteInstanceIPAddress indicates an expected call of DeleteInstanceIPAddress. +func (mr *MockClientMockRecorder) DeleteInstanceIPAddress(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteInstanceIPAddress", reflect.TypeOf((*MockClient)(nil).DeleteInstanceIPAddress), arg0, arg1, arg2) +} + // DeleteNodeBalancer mocks base method. func (m *MockClient) DeleteNodeBalancer(arg0 context.Context, arg1 int) error { m.ctrl.T.Helper() @@ -211,6 +255,21 @@ func (mr *MockClientMockRecorder) GetNodeBalancer(arg0, arg1 interface{}) *gomoc return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodeBalancer", reflect.TypeOf((*MockClient)(nil).GetNodeBalancer), arg0, arg1) } +// GetProfile mocks base method. +func (m *MockClient) GetProfile(arg0 context.Context) (*linodego.Profile, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetProfile", arg0) + ret0, _ := ret[0].(*linodego.Profile) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetProfile indicates an expected call of GetProfile. +func (mr *MockClientMockRecorder) GetProfile(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetProfile", reflect.TypeOf((*MockClient)(nil).GetProfile), arg0) +} + // ListFirewallDevices mocks base method. func (m *MockClient) ListFirewallDevices(arg0 context.Context, arg1 int, arg2 *linodego.ListOptions) ([]linodego.FirewallDevice, error) { m.ctrl.T.Helper() @@ -271,6 +330,21 @@ func (mr *MockClientMockRecorder) ListNodeBalancerFirewalls(arg0, arg1, arg2 int return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListNodeBalancerFirewalls", reflect.TypeOf((*MockClient)(nil).ListNodeBalancerFirewalls), arg0, arg1, arg2) } +// ListNodeBalancerNodes mocks base method. +func (m *MockClient) ListNodeBalancerNodes(arg0 context.Context, arg1, arg2 int, arg3 *linodego.ListOptions) ([]linodego.NodeBalancerNode, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListNodeBalancerNodes", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].([]linodego.NodeBalancerNode) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListNodeBalancerNodes indicates an expected call of ListNodeBalancerNodes. +func (mr *MockClientMockRecorder) ListNodeBalancerNodes(arg0, arg1, arg2, arg3 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListNodeBalancerNodes", reflect.TypeOf((*MockClient)(nil).ListNodeBalancerNodes), arg0, arg1, arg2, arg3) +} + // ListNodeBalancers mocks base method. func (m *MockClient) ListNodeBalancers(arg0 context.Context, arg1 *linodego.ListOptions) ([]linodego.NodeBalancer, error) { m.ctrl.T.Helper() @@ -286,6 +360,36 @@ func (mr *MockClientMockRecorder) ListNodeBalancers(arg0, arg1 interface{}) *gom return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListNodeBalancers", reflect.TypeOf((*MockClient)(nil).ListNodeBalancers), arg0, arg1) } +// ListVPCIPAddresses mocks base method. +func (m *MockClient) ListVPCIPAddresses(arg0 context.Context, arg1 int, arg2 *linodego.ListOptions) ([]linodego.VPCIP, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListVPCIPAddresses", arg0, arg1, arg2) + ret0, _ := ret[0].([]linodego.VPCIP) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListVPCIPAddresses indicates an expected call of ListVPCIPAddresses. +func (mr *MockClientMockRecorder) ListVPCIPAddresses(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListVPCIPAddresses", reflect.TypeOf((*MockClient)(nil).ListVPCIPAddresses), arg0, arg1, arg2) +} + +// ListVPCs mocks base method. +func (m *MockClient) ListVPCs(arg0 context.Context, arg1 *linodego.ListOptions) ([]linodego.VPC, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListVPCs", arg0, arg1) + ret0, _ := ret[0].([]linodego.VPC) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListVPCs indicates an expected call of ListVPCs. +func (mr *MockClientMockRecorder) ListVPCs(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListVPCs", reflect.TypeOf((*MockClient)(nil).ListVPCs), arg0, arg1) +} + // RebuildNodeBalancerConfig mocks base method. func (m *MockClient) RebuildNodeBalancerConfig(arg0 context.Context, arg1, arg2 int, arg3 linodego.NodeBalancerConfigRebuildOptions) (*linodego.NodeBalancerConfig, error) { m.ctrl.T.Helper() @@ -301,6 +405,20 @@ func (mr *MockClientMockRecorder) RebuildNodeBalancerConfig(arg0, arg1, arg2, ar return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RebuildNodeBalancerConfig", reflect.TypeOf((*MockClient)(nil).RebuildNodeBalancerConfig), arg0, arg1, arg2, arg3) } +// ShareIPAddresses mocks base method. +func (m *MockClient) ShareIPAddresses(arg0 context.Context, arg1 linodego.IPAddressesShareOptions) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ShareIPAddresses", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ShareIPAddresses indicates an expected call of ShareIPAddresses. +func (mr *MockClientMockRecorder) ShareIPAddresses(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ShareIPAddresses", reflect.TypeOf((*MockClient)(nil).ShareIPAddresses), arg0, arg1) +} + // UpdateFirewallRules mocks base method. func (m *MockClient) UpdateFirewallRules(arg0 context.Context, arg1 int, arg2 linodego.FirewallRuleSet) (*linodego.FirewallRuleSet, error) { m.ctrl.T.Helper() @@ -316,6 +434,21 @@ func (mr *MockClientMockRecorder) UpdateFirewallRules(arg0, arg1, arg2 interface return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateFirewallRules", reflect.TypeOf((*MockClient)(nil).UpdateFirewallRules), arg0, arg1, arg2) } +// UpdateInstanceConfigInterface mocks base method. +func (m *MockClient) UpdateInstanceConfigInterface(arg0 context.Context, arg1, arg2, arg3 int, arg4 linodego.InstanceConfigInterfaceUpdateOptions) (*linodego.InstanceConfigInterface, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateInstanceConfigInterface", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(*linodego.InstanceConfigInterface) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateInstanceConfigInterface indicates an expected call of UpdateInstanceConfigInterface. +func (mr *MockClientMockRecorder) UpdateInstanceConfigInterface(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateInstanceConfigInterface", reflect.TypeOf((*MockClient)(nil).UpdateInstanceConfigInterface), arg0, arg1, arg2, arg3, arg4) +} + // UpdateNodeBalancer mocks base method. func (m *MockClient) UpdateNodeBalancer(arg0 context.Context, arg1 int, arg2 linodego.NodeBalancerUpdateOptions) (*linodego.NodeBalancer, error) { m.ctrl.T.Helper() diff --git a/cloud/linode/cloud.go b/cloud/linode/cloud.go index 885b80a6..8ed3a18e 100644 --- a/cloud/linode/cloud.go +++ b/cloud/linode/cloud.go @@ -1,39 +1,66 @@ package linode import ( + "context" "fmt" "io" + "net" "os" + "strconv" + "time" - "github.com/linode/linodego" "github.com/spf13/pflag" + "golang.org/x/exp/slices" "k8s.io/client-go/informers" cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog/v2" + + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client" ) const ( // The name of this cloudprovider - ProviderName = "linode" - accessTokenEnv = "LINODE_API_TOKEN" - regionEnv = "LINODE_REGION" - urlEnv = "LINODE_URL" + ProviderName = "linode" + accessTokenEnv = "LINODE_API_TOKEN" + regionEnv = "LINODE_REGION" + ciliumLBType = "cilium-bgp" + nodeBalancerLBType = "nodebalancer" + tokenHealthCheckPeriod = 5 * time.Minute ) +var supportedLoadBalancerTypes = []string{ciliumLBType, nodeBalancerLBType} + // Options is a configuration object for this cloudprovider implementation. // We expect it to be initialized with flags external to this package, likely in // main.go var Options struct { - KubeconfigFlag *pflag.Flag - LinodeGoDebug bool + KubeconfigFlag *pflag.Flag + LinodeGoDebug bool + EnableRouteController bool + EnableTokenHealthChecker bool + // Deprecated: use VPCNames instead + VPCName string + VPCNames string + LoadBalancerType string + BGPNodeSelector string + IpHolderSuffix string + LinodeExternalNetwork *net.IPNet + NodeBalancerTags []string + GlobalStopChannel chan<- struct{} } type linodeCloud struct { - client Client - instances cloudprovider.InstancesV2 - loadbalancers cloudprovider.LoadBalancer + client client.Client + instances cloudprovider.InstancesV2 + loadbalancers cloudprovider.LoadBalancer + routes cloudprovider.Routes + linodeTokenHealthChecker *healthChecker } +var instanceCache *instances + func init() { + registerMetrics() cloudprovider.RegisterCloudProvider( ProviderName, func(io.Reader) (cloudprovider.Interface, error) { @@ -41,36 +68,103 @@ func init() { }) } +// newLinodeClientWithPrometheus creates a new client kept in its own local +// scope and returns an instrumented one that should be used and passed around +func newLinodeClientWithPrometheus(apiToken string, timeout time.Duration) (client.Client, error) { + linodeClient, err := client.New(apiToken, timeout) + if err != nil { + return nil, fmt.Errorf("client was not created succesfully: %w", err) + } + + if Options.LinodeGoDebug { + linodeClient.SetDebug(true) + } + + return client.NewClientWithPrometheus(linodeClient), nil +} + func newCloud() (cloudprovider.Interface, error) { + region := os.Getenv(regionEnv) + if region == "" { + return nil, fmt.Errorf("%s must be set in the environment (use a k8s secret)", regionEnv) + } + // Read environment variables (from secrets) apiToken := os.Getenv(accessTokenEnv) if apiToken == "" { return nil, fmt.Errorf("%s must be set in the environment (use a k8s secret)", accessTokenEnv) } - region := os.Getenv(regionEnv) - if region == "" { - return nil, fmt.Errorf("%s must be set in the environment (use a k8s secret)", regionEnv) + // set timeout used by linodeclient for API calls + timeout := client.DefaultClientTimeout + if raw, ok := os.LookupEnv("LINODE_REQUEST_TIMEOUT_SECONDS"); ok { + if t, err := strconv.Atoi(raw); err == nil && t > 0 { + timeout = time.Duration(t) * time.Second + } } - url := os.Getenv(urlEnv) - ua := fmt.Sprintf("linode-cloud-controller-manager %s", linodego.DefaultUserAgent) + linodeClient, err := newLinodeClientWithPrometheus(apiToken, timeout) + if err != nil { + return nil, err + } + + var healthChecker *healthChecker + + if Options.EnableTokenHealthChecker { + authenticated, err := client.CheckClientAuthenticated(context.TODO(), linodeClient) + if err != nil { + return nil, fmt.Errorf("linode client authenticated connection error: %w", err) + } + + if !authenticated { + return nil, fmt.Errorf("linode api token %q is invalid", accessTokenEnv) + } + + healthChecker = newHealthChecker(linodeClient, tokenHealthCheckPeriod, Options.GlobalStopChannel) + } - linodeClient, err := newLinodeClient(apiToken, ua, url) + if Options.VPCName != "" && Options.VPCNames != "" { + return nil, fmt.Errorf("cannot have both vpc-name and vpc-names set") + } + + if Options.VPCName != "" { + klog.Warningf("vpc-name flag is deprecated. Use vpc-names instead") + Options.VPCNames = Options.VPCName + } + + instanceCache = newInstances(linodeClient) + routes, err := newRoutes(linodeClient, instanceCache) if err != nil { - return nil, fmt.Errorf("client was not created succesfully: %w", err) + return nil, fmt.Errorf("routes client was not created successfully: %w", err) } - if Options.LinodeGoDebug { - linodeClient.SetDebug(true) + if Options.LoadBalancerType != "" && !slices.Contains(supportedLoadBalancerTypes, Options.LoadBalancerType) { + return nil, fmt.Errorf( + "unsupported default load-balancer type %s. Options are %v", + Options.LoadBalancerType, + supportedLoadBalancerTypes, + ) + } + + if Options.IpHolderSuffix != "" { + klog.Infof("Using IP holder suffix '%s'\n", Options.IpHolderSuffix) + } + + if len(Options.IpHolderSuffix) > 23 { + msg := fmt.Sprintf("ip-holder-suffix must be 23 characters or less: %s is %d characters\n", Options.IpHolderSuffix, len(Options.IpHolderSuffix)) + klog.Error(msg) + return nil, fmt.Errorf("%s", msg) } - // Return struct that satisfies cloudprovider.Interface - return &linodeCloud{ - client: linodeClient, - instances: newInstances(linodeClient), - loadbalancers: newLoadbalancers(linodeClient, region), - }, nil + // create struct that satisfies cloudprovider.Interface + lcloud := &linodeCloud{ + client: linodeClient, + instances: instanceCache, + loadbalancers: newLoadbalancers(linodeClient, region), + routes: routes, + linodeTokenHealthChecker: healthChecker, + } + return lcloud, nil } func (c *linodeCloud) Initialize(clientBuilder cloudprovider.ControllerClientBuilder, stopCh <-chan struct{}) { @@ -79,10 +173,14 @@ func (c *linodeCloud) Initialize(clientBuilder cloudprovider.ControllerClientBui serviceInformer := sharedInformer.Core().V1().Services() nodeInformer := sharedInformer.Core().V1().Nodes() + if c.linodeTokenHealthChecker != nil { + go c.linodeTokenHealthChecker.Run(stopCh) + } + serviceController := newServiceController(c.loadbalancers.(*loadbalancers), serviceInformer) go serviceController.Run(stopCh) - nodeController := newNodeController(kubeclient, c.client, nodeInformer) + nodeController := newNodeController(kubeclient, c.client, nodeInformer, instanceCache) go nodeController.Run(stopCh) } @@ -107,6 +205,9 @@ func (c *linodeCloud) Clusters() (cloudprovider.Clusters, bool) { } func (c *linodeCloud) Routes() (cloudprovider.Routes, bool) { + if Options.EnableRouteController { + return c.routes, true + } return nil, false } diff --git a/cloud/linode/cloud_test.go b/cloud/linode/cloud_test.go new file mode 100644 index 00000000..c6f2c97d --- /dev/null +++ b/cloud/linode/cloud_test.go @@ -0,0 +1,530 @@ +package linode + +import ( + "reflect" + "strings" + "testing" + + "github.com/golang/mock/gomock" + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client/mocks" + "github.com/stretchr/testify/assert" + cloudprovider "k8s.io/cloud-provider" +) + +func TestNewCloudRouteControllerDisabled(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + t.Setenv("LINODE_API_TOKEN", "dummyapitoken") + t.Setenv("LINODE_REGION", "us-east") + t.Setenv("LINODE_REQUEST_TIMEOUT_SECONDS", "10") + + t.Run("should not fail if vpc is empty and routecontroller is disabled", func(t *testing.T) { + Options.VPCName = "" + Options.EnableRouteController = false + _, err := newCloud() + assert.NoError(t, err) + }) + + t.Run("fail if vpcname is empty and routecontroller is enabled", func(t *testing.T) { + Options.VPCName = "" + Options.EnableRouteController = true + _, err := newCloud() + assert.Error(t, err) + }) +} + +func TestNewCloud(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + t.Setenv("LINODE_API_TOKEN", "dummyapitoken") + t.Setenv("LINODE_REGION", "us-east") + t.Setenv("LINODE_REQUEST_TIMEOUT_SECONDS", "10") + t.Setenv("LINODE_ROUTES_CACHE_TTL_SECONDS", "60") + Options.LinodeGoDebug = true + + t.Run("should fail if api token is empty", func(t *testing.T) { + t.Setenv("LINODE_API_TOKEN", "") + _, err := newCloud() + assert.Error(t, err, "expected error when api token is empty") + }) + + t.Run("should fail if region is empty", func(t *testing.T) { + t.Setenv("LINODE_REGION", "") + _, err := newCloud() + assert.Error(t, err, "expected error when linode region is empty") + }) + + t.Run("should fail if both vpcname and vpcnames are set", func(t *testing.T) { + Options.VPCName = "tt" + Options.VPCNames = "tt" + defer func() { + Options.VPCName = "" + Options.VPCNames = "" + }() + _, err := newCloud() + assert.Error(t, err, "expected error when both vpcname and vpcnames are set") + }) + + t.Run("should not fail if deprecated vpcname is set", func(t *testing.T) { + Options.VPCName = "tt" + defer func() { + Options.VPCName = "" + Options.VPCNames = "" + }() + _, err := newCloud() + assert.NoError(t, err, "expected no error if deprecated flag vpcname is set") + assert.Equal(t, Options.VPCNames, "tt", "expected vpcnames to be set to vpcname") + }) + + t.Run("should fail if incorrect loadbalancertype is set", func(t *testing.T) { + rtEnabled := Options.EnableRouteController + Options.EnableRouteController = false + Options.LoadBalancerType = "test" + defer func() { + Options.LoadBalancerType = "" + Options.EnableRouteController = rtEnabled + }() + _, err := newCloud() + assert.Error(t, err, "expected error if incorrect loadbalancertype is set") + }) + + t.Run("should fail if ipholdersuffix is longer than 23 chars", func(t *testing.T) { + suffix := Options.IpHolderSuffix + Options.IpHolderSuffix = strings.Repeat("a", 24) + rtEnabled := Options.EnableRouteController + Options.EnableRouteController = false + defer func() { + Options.IpHolderSuffix = suffix + Options.EnableRouteController = rtEnabled + }() + _, err := newCloud() + assert.Error(t, err, "expected error if ipholdersuffix is longer than 23 chars") + }) +} + +func Test_linodeCloud_LoadBalancer(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + type fields struct { + client *mocks.MockClient + instances cloudprovider.InstancesV2 + loadbalancers cloudprovider.LoadBalancer + routes cloudprovider.Routes + } + tests := []struct { + name string + fields fields + want cloudprovider.LoadBalancer + want1 bool + }{ + { + name: "should return loadbalancer interface", + fields: fields{ + client: client, + instances: newInstances(client), + loadbalancers: newLoadbalancers(client, "us-east"), + routes: nil, + }, + want: newLoadbalancers(client, "us-east"), + want1: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &linodeCloud{ + client: tt.fields.client, + instances: tt.fields.instances, + loadbalancers: tt.fields.loadbalancers, + routes: tt.fields.routes, + } + got, got1 := c.LoadBalancer() + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("linodeCloud.LoadBalancer() got = %v, want %v", got, tt.want) + } + if got1 != tt.want1 { + t.Errorf("linodeCloud.LoadBalancer() got1 = %v, want %v", got1, tt.want1) + } + }) + } +} + +func Test_linodeCloud_InstancesV2(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + type fields struct { + client *mocks.MockClient + instances cloudprovider.InstancesV2 + loadbalancers cloudprovider.LoadBalancer + routes cloudprovider.Routes + } + tests := []struct { + name string + fields fields + want cloudprovider.InstancesV2 + want1 bool + }{ + { + name: "should return instances interface", + fields: fields{ + client: client, + instances: newInstances(client), + loadbalancers: newLoadbalancers(client, "us-east"), + routes: nil, + }, + want: newInstances(client), + want1: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &linodeCloud{ + client: tt.fields.client, + instances: tt.fields.instances, + loadbalancers: tt.fields.loadbalancers, + routes: tt.fields.routes, + } + got, got1 := c.InstancesV2() + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("linodeCloud.InstancesV2() got = %v, want %v", got, tt.want) + } + if got1 != tt.want1 { + t.Errorf("linodeCloud.InstancesV2() got1 = %v, want %v", got1, tt.want1) + } + }) + } +} + +func Test_linodeCloud_Instances(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + type fields struct { + client *mocks.MockClient + instances cloudprovider.InstancesV2 + loadbalancers cloudprovider.LoadBalancer + routes cloudprovider.Routes + } + tests := []struct { + name string + fields fields + want cloudprovider.Instances + want1 bool + }{ + { + name: "should return nil", + fields: fields{ + client: client, + instances: newInstances(client), + loadbalancers: newLoadbalancers(client, "us-east"), + routes: nil, + }, + want: nil, + want1: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &linodeCloud{ + client: tt.fields.client, + instances: tt.fields.instances, + loadbalancers: tt.fields.loadbalancers, + routes: tt.fields.routes, + } + got, got1 := c.Instances() + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("linodeCloud.Instances() got = %v, want %v", got, tt.want) + } + if got1 != tt.want1 { + t.Errorf("linodeCloud.Instances() got1 = %v, want %v", got1, tt.want1) + } + }) + } +} + +func Test_linodeCloud_Zones(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + type fields struct { + client *mocks.MockClient + instances cloudprovider.InstancesV2 + loadbalancers cloudprovider.LoadBalancer + routes cloudprovider.Routes + } + tests := []struct { + name string + fields fields + want cloudprovider.Zones + want1 bool + }{ + { + name: "should return nil", + fields: fields{ + client: client, + instances: newInstances(client), + loadbalancers: newLoadbalancers(client, "us-east"), + routes: nil, + }, + want: nil, + want1: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &linodeCloud{ + client: tt.fields.client, + instances: tt.fields.instances, + loadbalancers: tt.fields.loadbalancers, + routes: tt.fields.routes, + } + got, got1 := c.Zones() + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("linodeCloud.Zones() got = %v, want %v", got, tt.want) + } + if got1 != tt.want1 { + t.Errorf("linodeCloud.Zones() got1 = %v, want %v", got1, tt.want1) + } + }) + } +} + +func Test_linodeCloud_Clusters(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + type fields struct { + client *mocks.MockClient + instances cloudprovider.InstancesV2 + loadbalancers cloudprovider.LoadBalancer + routes cloudprovider.Routes + } + tests := []struct { + name string + fields fields + want cloudprovider.Clusters + want1 bool + }{ + { + name: "should return nil", + fields: fields{ + client: client, + instances: newInstances(client), + loadbalancers: newLoadbalancers(client, "us-east"), + routes: nil, + }, + want: nil, + want1: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &linodeCloud{ + client: tt.fields.client, + instances: tt.fields.instances, + loadbalancers: tt.fields.loadbalancers, + routes: tt.fields.routes, + } + got, got1 := c.Clusters() + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("linodeCloud.Clusters() got = %v, want %v", got, tt.want) + } + if got1 != tt.want1 { + t.Errorf("linodeCloud.Clusters() got1 = %v, want %v", got1, tt.want1) + } + }) + } +} + +func Test_linodeCloud_Routes(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + r := &routes{} + type fields struct { + client *mocks.MockClient + instances cloudprovider.InstancesV2 + loadbalancers cloudprovider.LoadBalancer + routes cloudprovider.Routes + EnableRouteController bool + } + tests := []struct { + name string + fields fields + want cloudprovider.Routes + want1 bool + }{ + { + name: "should return nil", + fields: fields{ + client: client, + instances: newInstances(client), + loadbalancers: newLoadbalancers(client, "us-east"), + routes: r, + EnableRouteController: false, + }, + want: nil, + want1: false, + }, + { + name: "should return routes interface", + fields: fields{ + client: client, + instances: newInstances(client), + loadbalancers: newLoadbalancers(client, "us-east"), + routes: r, + EnableRouteController: true, + }, + want: r, + want1: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &linodeCloud{ + client: tt.fields.client, + instances: tt.fields.instances, + loadbalancers: tt.fields.loadbalancers, + routes: tt.fields.routes, + } + rt := Options.EnableRouteController + defer func() { Options.EnableRouteController = rt }() + Options.EnableRouteController = tt.fields.EnableRouteController + got, got1 := c.Routes() + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("linodeCloud.Routes() got = %v, want %v", got, tt.want) + } + if got1 != tt.want1 { + t.Errorf("linodeCloud.Routes() got1 = %v, want %v", got1, tt.want1) + } + }) + } +} + +func Test_linodeCloud_ProviderName(t *testing.T) { + type fields struct { + client *mocks.MockClient + instances cloudprovider.InstancesV2 + loadbalancers cloudprovider.LoadBalancer + routes cloudprovider.Routes + } + tests := []struct { + name string + fields fields + want string + }{ + { + name: "should return linode", + fields: fields{ + client: nil, + instances: nil, + loadbalancers: nil, + routes: nil, + }, + want: ProviderName, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &linodeCloud{ + client: tt.fields.client, + instances: tt.fields.instances, + loadbalancers: tt.fields.loadbalancers, + routes: tt.fields.routes, + } + if got := c.ProviderName(); got != tt.want { + t.Errorf("linodeCloud.ProviderName() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_linodeCloud_ScrubDNS(t *testing.T) { + type fields struct { + client *mocks.MockClient + instances cloudprovider.InstancesV2 + loadbalancers cloudprovider.LoadBalancer + routes cloudprovider.Routes + } + type args struct { + in0 []string + in1 []string + } + tests := []struct { + name string + fields fields + args args + wantNsOut []string + wantSrchOut []string + }{ + { + name: "should return linode", + fields: fields{ + client: nil, + instances: nil, + loadbalancers: nil, + routes: nil, + }, + wantNsOut: nil, + wantSrchOut: nil, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &linodeCloud{ + client: tt.fields.client, + instances: tt.fields.instances, + loadbalancers: tt.fields.loadbalancers, + routes: tt.fields.routes, + } + gotNsOut, gotSrchOut := c.ScrubDNS(tt.args.in0, tt.args.in1) + if !reflect.DeepEqual(gotNsOut, tt.wantNsOut) { + t.Errorf("linodeCloud.ScrubDNS() gotNsOut = %v, want %v", gotNsOut, tt.wantNsOut) + } + if !reflect.DeepEqual(gotSrchOut, tt.wantSrchOut) { + t.Errorf("linodeCloud.ScrubDNS() gotSrchOut = %v, want %v", gotSrchOut, tt.wantSrchOut) + } + }) + } +} + +func Test_linodeCloud_HasClusterID(t *testing.T) { + type fields struct { + client *mocks.MockClient + instances cloudprovider.InstancesV2 + loadbalancers cloudprovider.LoadBalancer + routes cloudprovider.Routes + } + tests := []struct { + name string + fields fields + want bool + }{ + { + name: "should return true", + fields: fields{ + client: nil, + instances: nil, + loadbalancers: nil, + routes: nil, + }, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &linodeCloud{ + client: tt.fields.client, + instances: tt.fields.instances, + loadbalancers: tt.fields.loadbalancers, + routes: tt.fields.routes, + } + if got := c.HasClusterID(); got != tt.want { + t.Errorf("linodeCloud.HasClusterID() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/cloud/linode/common.go b/cloud/linode/common.go index a56c1a69..9c4aed9f 100644 --- a/cloud/linode/common.go +++ b/cloud/linode/common.go @@ -2,11 +2,17 @@ package linode import ( "fmt" + "net" "strconv" "strings" + + "github.com/linode/linodego" ) -const providerIDPrefix = "linode://" +const ( + providerIDPrefix = "linode://" + DNS1123LabelMaxLength int = 63 +) type invalidProviderIDError struct { value string @@ -30,3 +36,21 @@ func parseProviderID(providerID string) (int, error) { } return id, nil } + +// IgnoreLinodeAPIError returns the error except matches to status code +func IgnoreLinodeAPIError(err error, code int) error { + apiErr := linodego.Error{Code: code} + if apiErr.Is(err) { + err = nil + } + + return err +} + +func isPrivate(ip *net.IP) bool { + if Options.LinodeExternalNetwork == nil { + return ip.IsPrivate() + } + + return ip.IsPrivate() && !Options.LinodeExternalNetwork.Contains(*ip) +} diff --git a/cloud/linode/common_test.go b/cloud/linode/common_test.go index 94dcb099..cb517640 100644 --- a/cloud/linode/common_test.go +++ b/cloud/linode/common_test.go @@ -1,6 +1,11 @@ package linode -import "testing" +import ( + "errors" + "testing" + + "github.com/linode/linodego" +) func TestParseProviderID(t *testing.T) { for _, tc := range []struct { @@ -46,3 +51,46 @@ func TestParseProviderID(t *testing.T) { }) } } + +func TestIgnoreLinodeAPIError(t *testing.T) { + t.Parallel() + tests := []struct { + name string + err error + code int + shouldFilter bool + }{{ + name: "Not Linode API error", + err: errors.New("foo"), + code: 0, + shouldFilter: false, + }, { + name: "Ignore not found Linode API error", + err: linodego.Error{ + Response: nil, + Code: 400, + Message: "not found", + }, + code: 400, + shouldFilter: true, + }, { + name: "Don't ignore not found Linode API error", + err: linodego.Error{ + Response: nil, + Code: 400, + Message: "not found", + }, + code: 500, + shouldFilter: false, + }} + for _, tt := range tests { + testcase := tt + t.Run(testcase.name, func(t *testing.T) { + t.Parallel() + err := IgnoreLinodeAPIError(testcase.err, testcase.code) + if testcase.shouldFilter && err != nil { + t.Error("expected err but got nil") + } + }) + } +} diff --git a/cloud/linode/fake_linode_test.go b/cloud/linode/fake_linode_test.go index d3a37e61..aeb069d8 100644 --- a/cloud/linode/fake_linode_test.go +++ b/cloud/linode/fake_linode_test.go @@ -5,11 +5,10 @@ import ( "encoding/json" "fmt" "io" + "log" "math/rand" "net" "net/http" - "path/filepath" - "regexp" "strconv" "strings" "testing" @@ -28,6 +27,7 @@ type fakeAPI struct { fwd map[int]map[int]*linodego.FirewallDevice // map of firewallID -> firewallDeviceID:FirewallDevice requests map[fakeRequest]struct{} + mux *http.ServeMux } type fakeRequest struct { @@ -37,7 +37,7 @@ type fakeRequest struct { } func newFake(t *testing.T) *fakeAPI { - return &fakeAPI{ + fake := &fakeAPI{ t: t, nb: make(map[string]*linodego.NodeBalancer), nbc: make(map[string]*linodego.NodeBalancerConfig), @@ -45,7 +45,10 @@ func newFake(t *testing.T) *fakeAPI { fw: make(map[int]*linodego.Firewall), fwd: make(map[int]map[int]*linodego.FirewallDevice), requests: make(map[fakeRequest]struct{}), + mux: http.NewServeMux(), } + fake.setupRoutes() + return fake } func (f *fakeAPI) ResetRequests() { @@ -72,346 +75,203 @@ func (f *fakeAPI) didRequestOccur(method, path, body string) bool { return ok } -func (f *fakeAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - urlPath := r.URL.Path - - if !strings.HasPrefix(urlPath, "/"+apiVersion) { - http.Error(w, "not found", http.StatusNotFound) - return - } - urlPath = strings.TrimPrefix(urlPath, "/"+apiVersion) - f.recordRequest(r, urlPath) +// paginatedResponse represents a single response from a paginated +// endpoint. +type paginatedResponse[T any] struct { + Page int `json:"page" url:"page,omitempty"` + Pages int `json:"pages" url:"pages,omitempty"` + Results int `json:"results" url:"results,omitempty"` + Data []T `json:"data"` +} - switch r.Method { - case "GET": - whichAPI := strings.Split(urlPath[1:], "/") - switch whichAPI[0] { - case "nodebalancers": - rx, _ := regexp.Compile("/nodebalancers/[0-9]+/configs/[0-9]+/nodes/[0-9]+") - if rx.MatchString(urlPath) { - id := filepath.Base(urlPath) - nbn, found := f.nbn[id] - if found { - rr, _ := json.Marshal(nbn) - _, _ = w.Write(rr) - - } else { - w.WriteHeader(404) - resp := linodego.APIError{ - Errors: []linodego.APIErrorReason{ - {Reason: "Not Found"}, - }, - } - rr, _ := json.Marshal(resp) - _, _ = w.Write(rr) - } - return +func (f *fakeAPI) setupRoutes() { + f.mux.HandleFunc("GET /v4/nodebalancers", func(w http.ResponseWriter, r *http.Request) { + res := 0 + data := []linodego.NodeBalancer{} + filter := r.Header.Get("X-Filter") + if filter == "" { + for _, n := range f.nb { + data = append(data, *n) + } + } else { + var fs map[string]string + err := json.Unmarshal([]byte(filter), &fs) + if err != nil { + f.t.Fatal(err) } - rx, _ = regexp.Compile("/nodebalancers/[0-9]+/configs/[0-9]+/nodes") - if rx.MatchString(urlPath) { - res := 0 - parts := strings.Split(urlPath[1:], "/") - nbcID, err := strconv.Atoi(parts[3]) - if err != nil { - f.t.Fatal(err) + for _, n := range f.nb { + if (n.Label != nil && fs["label"] != "" && *n.Label == fs["label"]) || + (fs["ipv4"] != "" && n.IPv4 != nil && *n.IPv4 == fs["ipv4"]) { + data = append(data, *n) } + } + } - data := []linodego.NodeBalancerNode{} - - for _, nbn := range f.nbn { - if nbcID == nbn.ConfigID { - data = append(data, *nbn) - } - } + resp := paginatedResponse[linodego.NodeBalancer]{ + Page: 1, + Pages: 1, + Results: res, + Data: data, + } + rr, _ := json.Marshal(resp) + _, _ = w.Write(rr) + }) - resp := linodego.NodeBalancerNodesPagedResponse{ - PageOptions: &linodego.PageOptions{ - Page: 1, - Pages: 1, - Results: res, - }, - Data: data, - } - rr, _ := json.Marshal(resp) - _, _ = w.Write(rr) - return - } - rx, _ = regexp.Compile("/nodebalancers/[0-9]+/configs/[0-9]+") - if rx.MatchString(urlPath) { - id := filepath.Base(urlPath) - nbc, found := f.nbc[id] - if found { - rr, _ := json.Marshal(nbc) - _, _ = w.Write(rr) - - } else { - w.WriteHeader(404) - resp := linodego.APIError{ - Errors: []linodego.APIErrorReason{ - {Reason: "Not Found"}, - }, - } - rr, _ := json.Marshal(resp) - _, _ = w.Write(rr) - } - return - } - rx, _ = regexp.Compile("/nodebalancers/[0-9]+/configs") - if rx.MatchString(urlPath) { - res := 0 - data := []linodego.NodeBalancerConfig{} - filter := r.Header.Get("X-Filter") - if filter == "" { - for _, n := range f.nbc { - data = append(data, *n) - } - } else { - var fs map[string]string - err := json.Unmarshal([]byte(filter), &fs) - if err != nil { - f.t.Fatal(err) - } - for _, n := range f.nbc { - if strconv.Itoa(n.NodeBalancerID) == fs["nodebalancer_id"] { - data = append(data, *n) - } - } - } - resp := linodego.NodeBalancerConfigsPagedResponse{ - PageOptions: &linodego.PageOptions{ - Page: 1, - Pages: 1, - Results: res, - }, - Data: data, - } - rr, err := json.Marshal(resp) - if err != nil { - f.t.Fatal(err) - } - _, _ = w.Write(rr) - return + f.mux.HandleFunc("GET /v4/nodebalancers/{nodeBalancerId}", func(w http.ResponseWriter, r *http.Request) { + nb, found := f.nb[r.PathValue("nodeBalancerId")] + if !found { + w.WriteHeader(404) + resp := linodego.APIError{ + Errors: []linodego.APIErrorReason{ + {Reason: "Not Found"}, + }, } + rr, _ := json.Marshal(resp) + _, _ = w.Write(rr) + return + } - rx = regexp.MustCompile("/nodebalancers/[0-9]+/firewalls") - if rx.MatchString(urlPath) { - id := strings.Split(urlPath, "/")[2] - devID, err := strconv.Atoi(id) - if err != nil { - f.t.Fatal(err) - } + rr, _ := json.Marshal(nb) + _, _ = w.Write(rr) + }) - data := linodego.NodeBalancerFirewallsPagedResponse{ - Data: []linodego.Firewall{}, - PageOptions: &linodego.PageOptions{ - Page: 1, - Pages: 1, - Results: 0, - }, - } + f.mux.HandleFunc("GET /v4/nodebalancers/{nodeBalancerId}/firewalls", func(w http.ResponseWriter, r *http.Request) { + nodebalancerID, err := strconv.Atoi(r.PathValue("nodeBalancerId")) + if err != nil { + f.t.Fatal(err) + } - out: - for fwid, devices := range f.fwd { - for _, device := range devices { - if device.Entity.ID == devID { - data.Data = append(data.Data, *f.fw[fwid]) - data.PageOptions.Results = 1 - break out - } - } - } + data := paginatedResponse[linodego.Firewall]{ + Page: 1, + Pages: 1, + Results: 0, + Data: []linodego.Firewall{}, + } - resp, _ := json.Marshal(data) - _, _ = w.Write(resp) - return + out: + for fwid, devices := range f.fwd { + for _, device := range devices { + if device.Entity.ID == nodebalancerID { + data.Data = append(data.Data, *f.fw[fwid]) + data.Results = 1 + break out + } } + } - rx, _ = regexp.Compile("/nodebalancers/[0-9]+") - if rx.MatchString(urlPath) { - id := filepath.Base(urlPath) - nb, found := f.nb[id] - if found { - rr, _ := json.Marshal(nb) - _, _ = w.Write(rr) - - } else { - w.WriteHeader(404) - resp := linodego.APIError{ - Errors: []linodego.APIErrorReason{ - {Reason: "Not Found"}, - }, - } - rr, _ := json.Marshal(resp) - _, _ = w.Write(rr) - } - return + resp, _ := json.Marshal(data) + _, _ = w.Write(resp) + }) + + // TODO: note that we discard `nodeBalancerId` + f.mux.HandleFunc("GET /v4/nodebalancers/{nodeBalancerId}/configs", func(w http.ResponseWriter, r *http.Request) { + res := 0 + data := []linodego.NodeBalancerConfig{} + filter := r.Header.Get("X-Filter") + if filter == "" { + for _, n := range f.nbc { + data = append(data, *n) + } + } else { + var fs map[string]string + err := json.Unmarshal([]byte(filter), &fs) + if err != nil { + f.t.Fatal(err) } - rx, _ = regexp.Compile("/nodebalancers") - if rx.MatchString(urlPath) { - res := 0 - data := []linodego.NodeBalancer{} - filter := r.Header.Get("X-Filter") - if filter == "" { - for _, n := range f.nb { - data = append(data, *n) - } - } else { - var fs map[string]string - err := json.Unmarshal([]byte(filter), &fs) - if err != nil { - f.t.Fatal(err) - } - for _, n := range f.nb { - if (n.Label != nil && fs["label"] != "" && *n.Label == fs["label"]) || - (fs["ipv4"] != "" && n.IPv4 != nil && *n.IPv4 == fs["ipv4"]) { - data = append(data, *n) - } - } - } - resp := linodego.NodeBalancersPagedResponse{ - PageOptions: &linodego.PageOptions{ - Page: 1, - Pages: 1, - Results: res, - }, - Data: data, + for _, n := range f.nbc { + if strconv.Itoa(n.NodeBalancerID) == fs["nodebalancer_id"] { + data = append(data, *n) } - rr, _ := json.Marshal(resp) - _, _ = w.Write(rr) - return } - case "networking": - rx, _ := regexp.Compile("/networking/firewalls/[0-9]+/devices") - if rx.MatchString(urlPath) { - fwdId, err := strconv.Atoi(strings.Split(urlPath, "/")[3]) - if err != nil { - f.t.Fatal(err) - } + } + resp := paginatedResponse[linodego.NodeBalancerConfig]{ + Page: 1, + Pages: 1, + Results: res, + Data: data, + } + rr, err := json.Marshal(resp) + if err != nil { + f.t.Fatal(err) + } + _, _ = w.Write(rr) + }) - firewallDevices, found := f.fwd[fwdId] - if found { - firewallDeviceList := []linodego.FirewallDevice{} - for i := range firewallDevices { - firewallDeviceList = append(firewallDeviceList, *firewallDevices[i]) - } - rr, _ := json.Marshal(linodego.FirewallDevicesPagedResponse{ - PageOptions: &linodego.PageOptions{Page: 1, Pages: 1, Results: len(firewallDeviceList)}, - Data: firewallDeviceList, - }) - _, _ = w.Write(rr) - } else { - w.WriteHeader(404) - resp := linodego.APIError{ - Errors: []linodego.APIErrorReason{ - {Reason: "Not Found"}, - }, - } - rr, _ := json.Marshal(resp) - _, _ = w.Write(rr) - } - return - } + f.mux.HandleFunc("GET /v4/nodebalancers/{nodeBalancerId}/configs/{configId}/nodes", func(w http.ResponseWriter, r *http.Request) { + res := 0 + nbcID, err := strconv.Atoi(r.PathValue("configId")) + if err != nil { + f.t.Fatal(err) } - case "POST": - tp := filepath.Base(urlPath) - if tp == "nodebalancers" { - nbco := linodego.NodeBalancerCreateOptions{} - if err := json.NewDecoder(r.Body).Decode(&nbco); err != nil { - f.t.Fatal(err) - } + data := []linodego.NodeBalancerNode{} - ip := net.IPv4(byte(rand.Intn(100)), byte(rand.Intn(100)), byte(rand.Intn(100)), byte(rand.Intn(100))).String() - hostname := fmt.Sprintf("nb-%s.%s.linode.com", strings.Replace(ip, ".", "-", 4), strings.ToLower(nbco.Region)) - nb := linodego.NodeBalancer{ - ID: rand.Intn(9999), - Label: nbco.Label, - Region: nbco.Region, - IPv4: &ip, - Hostname: &hostname, - Tags: nbco.Tags, + for _, nbn := range f.nbn { + if nbcID == nbn.ConfigID { + data = append(data, *nbn) } + } - if nbco.ClientConnThrottle != nil { - nb.ClientConnThrottle = *nbco.ClientConnThrottle - } - f.nb[strconv.Itoa(nb.ID)] = &nb - - for _, nbcco := range nbco.Configs { - if nbcco.Protocol == "https" { - if !strings.Contains(nbcco.SSLCert, "BEGIN CERTIFICATE") { - f.t.Fatal("HTTPS port declared without calid ssl cert", nbcco.SSLCert) - } - if !strings.Contains(nbcco.SSLKey, "BEGIN RSA PRIVATE KEY") { - f.t.Fatal("HTTPS port declared without calid ssl key", nbcco.SSLKey) - } - } - nbc := linodego.NodeBalancerConfig{ - ID: rand.Intn(9999), - Port: nbcco.Port, - Protocol: nbcco.Protocol, - ProxyProtocol: nbcco.ProxyProtocol, - Algorithm: nbcco.Algorithm, - Stickiness: nbcco.Stickiness, - Check: nbcco.Check, - CheckInterval: nbcco.CheckInterval, - CheckAttempts: nbcco.CheckAttempts, - CheckPath: nbcco.CheckPath, - CheckBody: nbcco.CheckBody, - CheckPassive: *nbcco.CheckPassive, - CheckTimeout: nbcco.CheckTimeout, - CipherSuite: nbcco.CipherSuite, - NodeBalancerID: nb.ID, - SSLCommonName: "sslcommonname", - SSLFingerprint: "sslfingerprint", - SSLCert: "", - SSLKey: "", - } - f.nbc[strconv.Itoa(nbc.ID)] = &nbc - - for _, nbnco := range nbcco.Nodes { - nbn := linodego.NodeBalancerNode{ - ID: rand.Intn(99999), - Address: nbnco.Address, - Label: nbnco.Label, - Weight: nbnco.Weight, - Mode: nbnco.Mode, - NodeBalancerID: nb.ID, - ConfigID: nbc.ID, - } - f.nbn[strconv.Itoa(nbn.ID)] = &nbn - } - } + resp := paginatedResponse[linodego.NodeBalancerNode]{ + Page: 1, + Pages: 1, + Results: res, + Data: data, + } + rr, _ := json.Marshal(resp) + _, _ = w.Write(rr) + }) - if nbco.FirewallID != 0 { - createFirewallDevice(nbco.FirewallID, f, linodego.FirewallDeviceCreateOptions{ - ID: nb.ID, - Type: "nodebalancer", - }) - } + f.mux.HandleFunc("GET /v4/networking/firewalls/{firewallId}/devices", func(w http.ResponseWriter, r *http.Request) { + fwdId, err := strconv.Atoi(r.PathValue("firewallId")) + if err != nil { + f.t.Fatal(err) + } - resp, err := json.Marshal(nb) - if err != nil { - f.t.Fatal(err) + firewallDevices, found := f.fwd[fwdId] + if !found { + w.WriteHeader(404) + resp := linodego.APIError{ + Errors: []linodego.APIErrorReason{ + {Reason: "Not Found"}, + }, } - _, _ = w.Write(resp) + rr, _ := json.Marshal(resp) + _, _ = w.Write(rr) return + } - } else if tp == "rebuild" { - parts := strings.Split(urlPath[1:], "/") - nbcco := new(linodego.NodeBalancerConfigRebuildOptions) - if err := json.NewDecoder(r.Body).Decode(nbcco); err != nil { - f.t.Fatal(err) - } - nbid, err := strconv.Atoi(parts[1]) - if err != nil { - f.t.Fatal(err) - } - nbcid, err := strconv.Atoi(parts[3]) - if err != nil { - f.t.Fatal(err) - } + firewallDeviceList := []linodego.FirewallDevice{} + for i := range firewallDevices { + firewallDeviceList = append(firewallDeviceList, *firewallDevices[i]) + } + rr, _ := json.Marshal(paginatedResponse[linodego.FirewallDevice]{Page: 1, Pages: 1, Results: len(firewallDeviceList), Data: firewallDeviceList}) + _, _ = w.Write(rr) + }) + + f.mux.HandleFunc("POST /v4/nodebalancers", func(w http.ResponseWriter, r *http.Request) { + nbco := linodego.NodeBalancerCreateOptions{} + if err := json.NewDecoder(r.Body).Decode(&nbco); err != nil { + f.t.Fatal(err) + } + + ip := net.IPv4(byte(rand.Intn(100)), byte(rand.Intn(100)), byte(rand.Intn(100)), byte(rand.Intn(100))).String() + hostname := fmt.Sprintf("nb-%s.%s.linode.com", strings.Replace(ip, ".", "-", 4), strings.ToLower(nbco.Region)) + nb := linodego.NodeBalancer{ + ID: rand.Intn(9999), + Label: nbco.Label, + Region: nbco.Region, + IPv4: &ip, + Hostname: &hostname, + Tags: nbco.Tags, + } + + if nbco.ClientConnThrottle != nil { + nb.ClientConnThrottle = *nbco.ClientConnThrottle + } + f.nb[strconv.Itoa(nb.ID)] = &nb + + for _, nbcco := range nbco.Configs { if nbcco.Protocol == "https" { if !strings.Contains(nbcco.SSLCert, "BEGIN CERTIFICATE") { f.t.Fatal("HTTPS port declared without calid ssl cert", nbcco.SSLCert) @@ -420,8 +280,8 @@ func (f *fakeAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) { f.t.Fatal("HTTPS port declared without calid ssl key", nbcco.SSLKey) } } - nbcc := linodego.NodeBalancerConfig{ - ID: nbcid, + nbc := linodego.NodeBalancerConfig{ + ID: rand.Intn(9999), Port: nbcco.Port, Protocol: nbcco.Protocol, ProxyProtocol: nbcco.ProxyProtocol, @@ -435,336 +295,395 @@ func (f *fakeAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) { CheckPassive: *nbcco.CheckPassive, CheckTimeout: nbcco.CheckTimeout, CipherSuite: nbcco.CipherSuite, - NodeBalancerID: nbid, + NodeBalancerID: nb.ID, SSLCommonName: "sslcommonname", SSLFingerprint: "sslfingerprint", SSLCert: "", SSLKey: "", } + f.nbc[strconv.Itoa(nbc.ID)] = &nbc - f.nbc[strconv.Itoa(nbcc.ID)] = &nbcc - for k, n := range f.nbn { - if n.ConfigID == nbcc.ID { - delete(f.nbn, k) - } - } - - for _, n := range nbcco.Nodes { - node := linodego.NodeBalancerNode{ + for _, nbnco := range nbcco.Nodes { + nbn := linodego.NodeBalancerNode{ ID: rand.Intn(99999), - Address: n.Address, - Label: n.Label, - Weight: n.Weight, - Mode: n.Mode, - NodeBalancerID: nbid, - ConfigID: nbcc.ID, + Address: nbnco.Address, + Label: nbnco.Label, + Weight: nbnco.Weight, + Mode: nbnco.Mode, + NodeBalancerID: nb.ID, + ConfigID: nbc.ID, } - f.nbn[strconv.Itoa(node.ID)] = &node - } - resp, err := json.Marshal(nbcc) - if err != nil { - f.t.Fatal(err) - } - _, _ = w.Write(resp) - return - } else if tp == "configs" { - parts := strings.Split(urlPath[1:], "/") - nbcco := new(linodego.NodeBalancerConfigCreateOptions) - if err := json.NewDecoder(r.Body).Decode(nbcco); err != nil { - f.t.Fatal(err) - } - nbid, err := strconv.Atoi(parts[1]) - if err != nil { - f.t.Fatal(err) + f.nbn[strconv.Itoa(nbn.ID)] = &nbn } + } - nbcc := linodego.NodeBalancerConfig{ - ID: rand.Intn(9999), - Port: nbcco.Port, - Protocol: nbcco.Protocol, - ProxyProtocol: nbcco.ProxyProtocol, - Algorithm: nbcco.Algorithm, - Stickiness: nbcco.Stickiness, - Check: nbcco.Check, - CheckInterval: nbcco.CheckInterval, - CheckAttempts: nbcco.CheckAttempts, - CheckPath: nbcco.CheckPath, - CheckBody: nbcco.CheckBody, - CheckPassive: *nbcco.CheckPassive, - CheckTimeout: nbcco.CheckTimeout, - CipherSuite: nbcco.CipherSuite, - NodeBalancerID: nbid, - SSLCommonName: "sslcomonname", - SSLFingerprint: "sslfingerprint", - SSLCert: "", - SSLKey: "", - } - f.nbc[strconv.Itoa(nbcc.ID)] = &nbcc + if nbco.FirewallID != 0 { + createFirewallDevice(nbco.FirewallID, f, linodego.FirewallDeviceCreateOptions{ + ID: nb.ID, + Type: "nodebalancer", + }) + } - resp, err := json.Marshal(nbcc) - if err != nil { - f.t.Fatal(err) - } - _, _ = w.Write(resp) - return - } else if tp == "nodes" { - parts := strings.Split(urlPath[1:], "/") - nbnco := new(linodego.NodeBalancerNodeCreateOptions) - if err := json.NewDecoder(r.Body).Decode(nbnco); err != nil { - f.t.Fatal(err) + resp, err := json.Marshal(nb) + if err != nil { + f.t.Fatal(err) + } + _, _ = w.Write(resp) + }) + + f.mux.HandleFunc("POST /v4/nodebalancers/{nodeBalancerId}/configs", func(w http.ResponseWriter, r *http.Request) { + nbcco := new(linodego.NodeBalancerConfigCreateOptions) + if err := json.NewDecoder(r.Body).Decode(nbcco); err != nil { + f.t.Fatal(err) + } + nbid, err := strconv.Atoi(r.PathValue("nodeBalancerId")) + if err != nil { + f.t.Fatal(err) + } + + nbcc := linodego.NodeBalancerConfig{ + ID: rand.Intn(9999), + Port: nbcco.Port, + Protocol: nbcco.Protocol, + ProxyProtocol: nbcco.ProxyProtocol, + Algorithm: nbcco.Algorithm, + Stickiness: nbcco.Stickiness, + Check: nbcco.Check, + CheckInterval: nbcco.CheckInterval, + CheckAttempts: nbcco.CheckAttempts, + CheckPath: nbcco.CheckPath, + CheckBody: nbcco.CheckBody, + CheckPassive: *nbcco.CheckPassive, + CheckTimeout: nbcco.CheckTimeout, + CipherSuite: nbcco.CipherSuite, + NodeBalancerID: nbid, + SSLCommonName: "sslcomonname", + SSLFingerprint: "sslfingerprint", + SSLCert: "", + SSLKey: "", + } + f.nbc[strconv.Itoa(nbcc.ID)] = &nbcc + + resp, err := json.Marshal(nbcc) + if err != nil { + f.t.Fatal(err) + } + _, _ = w.Write(resp) + }) + + f.mux.HandleFunc("POST /v4/nodebalancers/{nodeBalancerId}/configs/{configId}/rebuild", func(w http.ResponseWriter, r *http.Request) { + nbcco := new(linodego.NodeBalancerConfigRebuildOptions) + if err := json.NewDecoder(r.Body).Decode(nbcco); err != nil { + f.t.Fatal(err) + } + nbid, err := strconv.Atoi(r.PathValue("nodeBalancerId")) + if err != nil { + f.t.Fatal(err) + } + nbcid, err := strconv.Atoi(r.PathValue("configId")) + if err != nil { + f.t.Fatal(err) + } + if nbcco.Protocol == "https" { + if !strings.Contains(nbcco.SSLCert, "BEGIN CERTIFICATE") { + f.t.Fatal("HTTPS port declared without calid ssl cert", nbcco.SSLCert) } - nbid, err := strconv.Atoi(parts[1]) - if err != nil { - f.t.Fatal(err) + if !strings.Contains(nbcco.SSLKey, "BEGIN RSA PRIVATE KEY") { + f.t.Fatal("HTTPS port declared without calid ssl key", nbcco.SSLKey) } - nbcid, err := strconv.Atoi(parts[3]) - if err != nil { - f.t.Fatal(err) + } + nbcc := linodego.NodeBalancerConfig{ + ID: nbcid, + Port: nbcco.Port, + Protocol: nbcco.Protocol, + ProxyProtocol: nbcco.ProxyProtocol, + Algorithm: nbcco.Algorithm, + Stickiness: nbcco.Stickiness, + Check: nbcco.Check, + CheckInterval: nbcco.CheckInterval, + CheckAttempts: nbcco.CheckAttempts, + CheckPath: nbcco.CheckPath, + CheckBody: nbcco.CheckBody, + CheckPassive: *nbcco.CheckPassive, + CheckTimeout: nbcco.CheckTimeout, + CipherSuite: nbcco.CipherSuite, + NodeBalancerID: nbid, + SSLCommonName: "sslcommonname", + SSLFingerprint: "sslfingerprint", + SSLCert: "", + SSLKey: "", + } + + f.nbc[strconv.Itoa(nbcc.ID)] = &nbcc + for k, n := range f.nbn { + if n.ConfigID == nbcc.ID { + delete(f.nbn, k) } - nbn := linodego.NodeBalancerNode{ + } + + for _, n := range nbcco.Nodes { + node := linodego.NodeBalancerNode{ ID: rand.Intn(99999), - Address: nbnco.Address, - Label: nbnco.Label, - Status: "UP", - Weight: nbnco.Weight, - Mode: nbnco.Mode, - ConfigID: nbcid, + Address: n.Address, + Label: n.Label, + Weight: n.Weight, + Mode: n.Mode, NodeBalancerID: nbid, + ConfigID: nbcc.ID, } - f.nbn[strconv.Itoa(nbn.ID)] = &nbn - resp, err := json.Marshal(nbn) - if err != nil { - f.t.Fatal(err) - } - _, _ = w.Write(resp) - return - } else if tp == "firewalls" { - fco := linodego.FirewallCreateOptions{} - if err := json.NewDecoder(r.Body).Decode(&fco); err != nil { - f.t.Fatal(err) - } + f.nbn[strconv.Itoa(node.ID)] = &node + } + resp, err := json.Marshal(nbcc) + if err != nil { + f.t.Fatal(err) + } + _, _ = w.Write(resp) + }) - firewall := linodego.Firewall{ - ID: rand.Intn(9999), - Label: fco.Label, - Rules: fco.Rules, - Tags: fco.Tags, - Status: "enabled", - } + f.mux.HandleFunc("POST /v4/networking/firewalls", func(w http.ResponseWriter, r *http.Request) { + fco := linodego.FirewallCreateOptions{} + if err := json.NewDecoder(r.Body).Decode(&fco); err != nil { + f.t.Fatal(err) + } - f.fw[firewall.ID] = &firewall - resp, err := json.Marshal(firewall) - if err != nil { - f.t.Fatal(err) - } - _, _ = w.Write(resp) - return - } else if tp == "devices" { - fwId := strings.Split(urlPath, "/")[3] - fdco := linodego.FirewallDeviceCreateOptions{} - if err := json.NewDecoder(r.Body).Decode(&fdco); err != nil { - f.t.Fatal(err) - } + firewall := linodego.Firewall{ + ID: rand.Intn(9999), + Label: fco.Label, + Rules: fco.Rules, + Tags: fco.Tags, + Status: "enabled", + } - firewallID, err := strconv.Atoi(fwId) - if err != nil { - f.t.Fatal(err) - } + f.fw[firewall.ID] = &firewall + resp, err := json.Marshal(firewall) + if err != nil { + f.t.Fatal(err) + } + _, _ = w.Write(resp) + }) - fwd := createFirewallDevice(firewallID, f, fdco) - resp, err := json.Marshal(fwd) - if err != nil { - f.t.Fatal(err) - } - _, _ = w.Write(resp) - return + f.mux.HandleFunc("POST /v4/networking/firewalls/{firewallId}/devices", func(w http.ResponseWriter, r *http.Request) { + fdco := linodego.FirewallDeviceCreateOptions{} + if err := json.NewDecoder(r.Body).Decode(&fdco); err != nil { + f.t.Fatal(err) } - case "DELETE": - idRaw := filepath.Base(urlPath) - id, err := strconv.Atoi(idRaw) + + firewallID, err := strconv.Atoi(r.PathValue("firewallId")) if err != nil { f.t.Fatal(err) } - if strings.Contains(urlPath, "nodes") { - delete(f.nbn, idRaw) - } else if strings.Contains(urlPath, "configs") { - delete(f.nbc, idRaw) - for k, n := range f.nbn { - if n.ConfigID == id { - delete(f.nbn, k) - } - } - } else if strings.Contains(urlPath, "nodebalancers") { - delete(f.nb, idRaw) + fwd := createFirewallDevice(firewallID, f, fdco) + resp, err := json.Marshal(fwd) + if err != nil { + f.t.Fatal(err) + } + _, _ = w.Write(resp) + }) - for k, c := range f.nbc { - if c.NodeBalancerID == id { - delete(f.nbc, k) - } - } + f.mux.HandleFunc("DELETE /v4/nodebalancers/{nodeBalancerId}", func(w http.ResponseWriter, r *http.Request) { + delete(f.nb, r.PathValue("nodeBalancerId")) + nid, err := strconv.Atoi(r.PathValue("nodeBalancerId")) + if err != nil { + f.t.Fatal(err) + } - for k, n := range f.nbn { - if n.NodeBalancerID == id { - delete(f.nbn, k) - } - } - } else if strings.Contains(urlPath, "devices") { - firewallId, err := strconv.Atoi(strings.Split(urlPath, "/")[3]) - if err != nil { - f.t.Fatal(err) + for k, c := range f.nbc { + if c.NodeBalancerID == nid { + delete(f.nbc, k) } + } - deviceId, err := strconv.Atoi(strings.Split(urlPath, "/")[5]) - if err != nil { - f.t.Fatal(err) - } - delete(f.fwd[firewallId], deviceId) - } else if strings.Contains(urlPath, "firewalls") { - firewallId, err := strconv.Atoi(strings.Split(urlPath, "/")[3]) - if err != nil { - f.t.Fatal(err) + for k, n := range f.nbn { + if n.NodeBalancerID == nid { + delete(f.nbn, k) } + } + }) + + f.mux.HandleFunc("DELETE /v4/nodebalancers/{nodeBalancerId}/configs/{configId}/nodes/{nodeId}", func(w http.ResponseWriter, r *http.Request) { + delete(f.nbn, r.PathValue("nodeId")) + }) + + f.mux.HandleFunc("DELETE /v4/nodebalancers/{nodeBalancerId}/configs/{configId}", func(w http.ResponseWriter, r *http.Request) { + delete(f.nbc, r.PathValue("configId")) - delete(f.fwd, firewallId) - delete(f.fw, firewallId) + cid, err := strconv.Atoi(r.PathValue("configId")) + if err != nil { + f.t.Fatal(err) } - case "PUT": - if strings.Contains(urlPath, "nodes") { - f.t.Fatal("PUT ...nodes is not supported by the mock API") - } else if strings.Contains(urlPath, "configs") { - parts := strings.Split(urlPath[1:], "/") - nbcco := new(linodego.NodeBalancerConfigUpdateOptions) - if err := json.NewDecoder(r.Body).Decode(nbcco); err != nil { - f.t.Fatal(err) - } - nbcid, err := strconv.Atoi(parts[3]) - if err != nil { - f.t.Fatal(err) - } - nbid, err := strconv.Atoi(parts[1]) - if err != nil { - f.t.Fatal(err) + + for k, n := range f.nbn { + if n.ConfigID == cid { + delete(f.nbn, k) } + } + }) - nbcc := linodego.NodeBalancerConfig{ - ID: nbcid, - Port: nbcco.Port, - Protocol: nbcco.Protocol, - ProxyProtocol: nbcco.ProxyProtocol, - Algorithm: nbcco.Algorithm, - Stickiness: nbcco.Stickiness, - Check: nbcco.Check, - CheckInterval: nbcco.CheckInterval, - CheckAttempts: nbcco.CheckAttempts, - CheckPath: nbcco.CheckPath, - CheckBody: nbcco.CheckBody, - CheckPassive: *nbcco.CheckPassive, - CheckTimeout: nbcco.CheckTimeout, - CipherSuite: nbcco.CipherSuite, + f.mux.HandleFunc("DELETE /v4/networking/firewalls/{firewallId}", func(w http.ResponseWriter, r *http.Request) { + firewallId, err := strconv.Atoi(r.PathValue("firewallId")) + if err != nil { + f.t.Fatal(err) + } + + delete(f.fwd, firewallId) + delete(f.fw, firewallId) + }) + + f.mux.HandleFunc("DELETE /v4/networking/firewalls/{firewallId}/devices/{deviceId}", func(w http.ResponseWriter, r *http.Request) { + firewallId, err := strconv.Atoi(r.PathValue("firewallId")) + if err != nil { + f.t.Fatal(err) + } + + deviceId, err := strconv.Atoi(r.PathValue("deviceId")) + if err != nil { + f.t.Fatal(err) + } + delete(f.fwd[firewallId], deviceId) + }) + + // TODO: reimplement all of this + f.mux.HandleFunc("PUT /v4/nodebalancers/{nodeBalancerId}/configs/{configId}", func(w http.ResponseWriter, r *http.Request) { + nbcco := new(linodego.NodeBalancerConfigUpdateOptions) + if err := json.NewDecoder(r.Body).Decode(nbcco); err != nil { + f.t.Fatal(err) + } + nbcid, err := strconv.Atoi(r.PathValue("configId")) + if err != nil { + f.t.Fatal(err) + } + nbid, err := strconv.Atoi(r.PathValue("nodeBalancerId")) + if err != nil { + f.t.Fatal(err) + } + + nbcc := linodego.NodeBalancerConfig{ + ID: nbcid, + Port: nbcco.Port, + Protocol: nbcco.Protocol, + ProxyProtocol: nbcco.ProxyProtocol, + Algorithm: nbcco.Algorithm, + Stickiness: nbcco.Stickiness, + Check: nbcco.Check, + CheckInterval: nbcco.CheckInterval, + CheckAttempts: nbcco.CheckAttempts, + CheckPath: nbcco.CheckPath, + CheckBody: nbcco.CheckBody, + CheckPassive: *nbcco.CheckPassive, + CheckTimeout: nbcco.CheckTimeout, + CipherSuite: nbcco.CipherSuite, + NodeBalancerID: nbid, + SSLCommonName: "sslcommonname", + SSLFingerprint: "sslfingerprint", + SSLCert: "", + SSLKey: "", + } + f.nbc[strconv.Itoa(nbcc.ID)] = &nbcc + + for _, n := range nbcco.Nodes { + node := linodego.NodeBalancerNode{ + ID: rand.Intn(99999), + Address: n.Address, + Label: n.Label, + Weight: n.Weight, + Mode: n.Mode, NodeBalancerID: nbid, - SSLCommonName: "sslcommonname", - SSLFingerprint: "sslfingerprint", - SSLCert: "", - SSLKey: "", + ConfigID: nbcc.ID, } - f.nbc[strconv.Itoa(nbcc.ID)] = &nbcc - for _, n := range nbcco.Nodes { - node := linodego.NodeBalancerNode{ - ID: rand.Intn(99999), - Address: n.Address, - Label: n.Label, - Weight: n.Weight, - Mode: n.Mode, - NodeBalancerID: nbid, - ConfigID: nbcc.ID, - } + f.nbn[strconv.Itoa(node.ID)] = &node + } - f.nbn[strconv.Itoa(node.ID)] = &node - } + resp, err := json.Marshal(nbcc) + if err != nil { + f.t.Fatal(err) + } + _, _ = w.Write(resp) + }) - resp, err := json.Marshal(nbcc) + f.mux.HandleFunc("PUT /v4/networking/firewalls/{firewallID}/rules", func(w http.ResponseWriter, r *http.Request) { + fwrs := new(linodego.FirewallRuleSet) + if err := json.NewDecoder(r.Body).Decode(fwrs); err != nil { + f.t.Fatal(err) + } + + fwID, err := strconv.Atoi(r.PathValue("firewallID")) + if err != nil { + f.t.Fatal(err) + } + + if firewall, found := f.fw[fwID]; found { + firewall.Rules.Inbound = fwrs.Inbound + firewall.Rules.InboundPolicy = fwrs.InboundPolicy + // outbound rules do not apply, ignoring. + f.fw[fwID] = firewall + resp, err := json.Marshal(firewall) if err != nil { f.t.Fatal(err) } _, _ = w.Write(resp) return - } else if strings.Contains(urlPath, "nodebalancer") { - parts := strings.Split(urlPath[1:], "/") - nbuo := new(linodego.NodeBalancerUpdateOptions) - if err := json.NewDecoder(r.Body).Decode(nbuo); err != nil { - f.t.Fatal(err) - } - if _, err := strconv.Atoi(parts[1]); err != nil { - f.t.Fatal(err) - } + } - if nb, found := f.nb[parts[1]]; found { - if nbuo.ClientConnThrottle != nil { - nb.ClientConnThrottle = *nbuo.ClientConnThrottle - } - if nbuo.Label != nil { - nb.Label = nbuo.Label - } - if nbuo.Tags != nil { - nb.Tags = *nbuo.Tags - } + w.WriteHeader(404) + resp := linodego.APIError{ + Errors: []linodego.APIErrorReason{ + {Reason: "Not Found"}, + }, + } + rr, _ := json.Marshal(resp) + _, _ = w.Write(rr) + }) - f.nb[strconv.Itoa(nb.ID)] = nb - resp, err := json.Marshal(nb) - if err != nil { - f.t.Fatal(err) - } - _, _ = w.Write(resp) - return - } + f.mux.HandleFunc("PUT /v4/nodebalancers/{nodeBalancerId}", func(w http.ResponseWriter, r *http.Request) { + nbuo := new(linodego.NodeBalancerUpdateOptions) + if err := json.NewDecoder(r.Body).Decode(nbuo); err != nil { + f.t.Fatal(err) + } + if _, err := strconv.Atoi(r.PathValue("nodeBalancerId")); err != nil { + f.t.Fatal(err) + } - w.WriteHeader(404) - resp := linodego.APIError{ - Errors: []linodego.APIErrorReason{ - {Reason: "Not Found"}, - }, + if nb, found := f.nb[r.PathValue("nodeBalancerId")]; found { + if nbuo.ClientConnThrottle != nil { + nb.ClientConnThrottle = *nbuo.ClientConnThrottle } - rr, _ := json.Marshal(resp) - _, _ = w.Write(rr) - - } else if strings.Contains(urlPath, "firewalls") { - // path is networking/firewalls/%d/rules - parts := strings.Split(urlPath[1:], "/") - fwrs := new(linodego.FirewallRuleSet) - if err := json.NewDecoder(r.Body).Decode(fwrs); err != nil { - f.t.Fatal(err) + if nbuo.Label != nil { + nb.Label = nbuo.Label + } + if nbuo.Tags != nil { + nb.Tags = *nbuo.Tags } - fwID, err := strconv.Atoi(parts[2]) + f.nb[strconv.Itoa(nb.ID)] = nb + resp, err := json.Marshal(nb) if err != nil { f.t.Fatal(err) } + _, _ = w.Write(resp) + return + } - if firewall, found := f.fw[fwID]; found { - firewall.Rules.Inbound = fwrs.Inbound - firewall.Rules.InboundPolicy = fwrs.InboundPolicy - // outbound rules do not apply, ignoring. - f.fw[fwID] = firewall - resp, err := json.Marshal(firewall) - if err != nil { - f.t.Fatal(err) - } - _, _ = w.Write(resp) - return - } - - w.WriteHeader(404) - resp := linodego.APIError{ - Errors: []linodego.APIErrorReason{ - {Reason: "Not Found"}, - }, - } - rr, _ := json.Marshal(resp) - _, _ = w.Write(rr) + w.WriteHeader(404) + resp := linodego.APIError{ + Errors: []linodego.APIErrorReason{ + {Reason: "Not Found"}, + }, } - } + rr, _ := json.Marshal(resp) + _, _ = w.Write(rr) + }) +} + +func (f *fakeAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) { + log.Printf("fakeAPI: %s %s", r.Method, r.URL.Path) + + urlPath := strings.TrimPrefix(r.URL.Path, "/"+apiVersion) + f.recordRequest(r, urlPath) + + w.Header().Add("Content-Type", "application/json") + f.mux.ServeHTTP(w, r) } func createFirewallDevice(fwId int, f *fakeAPI, fdco linodego.FirewallDeviceCreateOptions) linodego.FirewallDevice { diff --git a/cloud/linode/firewall/firewalls.go b/cloud/linode/firewall/firewalls.go new file mode 100644 index 00000000..e818dbfe --- /dev/null +++ b/cloud/linode/firewall/firewalls.go @@ -0,0 +1,504 @@ +package firewall + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strconv" + "strings" + + "golang.org/x/exp/slices" + + "github.com/linode/linodego" + v1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" + + "github.com/linode/linode-cloud-controller-manager/cloud/annotations" + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client" +) + +const ( + maxFirewallRuleLabelLen = 32 + maxIPsPerFirewall = 255 + maxRulesPerFirewall = 25 +) + +var ( + ErrTooManyIPs = errors.New("too many IPs in this ACL, will exceed rules per firewall limit") + ErrTooManyNBFirewalls = errors.New("too many firewalls attached to a nodebalancer") + ErrInvalidFWConfig = errors.New("specify either an allowList or a denyList for a firewall") +) + +type LinodeClient struct { + Client client.Client +} + +type aclConfig struct { + AllowList *linodego.NetworkAddresses `json:"allowList"` + DenyList *linodego.NetworkAddresses `json:"denyList"` +} + +func (l *LinodeClient) CreateFirewall(ctx context.Context, opts linodego.FirewallCreateOptions) (fw *linodego.Firewall, err error) { + return l.Client.CreateFirewall(ctx, opts) +} + +func (l *LinodeClient) DeleteFirewall(ctx context.Context, firewall *linodego.Firewall) error { + fwDevices, err := l.Client.ListFirewallDevices(ctx, firewall.ID, &linodego.ListOptions{}) + if err != nil { + klog.Errorf("Error in listing firewall devices: %v", err) + return err + } + if len(fwDevices) > 1 { + klog.Errorf("Found more than one device attached to firewall ID: %d, devices: %+v. Skipping delete of firewall", firewall.ID, fwDevices) + return nil + } + return l.Client.DeleteFirewall(ctx, firewall.ID) +} + +func (l *LinodeClient) DeleteNodeBalancerFirewall( + ctx context.Context, + service *v1.Service, + nb *linodego.NodeBalancer, +) error { + _, fwACLExists := service.GetAnnotations()[annotations.AnnLinodeCloudFirewallACL] + if fwACLExists { // if an ACL exists, check if firewall exists and delete it. + firewalls, err := l.Client.ListNodeBalancerFirewalls(ctx, nb.ID, &linodego.ListOptions{}) + if err != nil { + return err + } + + switch len(firewalls) { + case 0: + klog.Info("No firewall attached to nodebalancer, nothing to clean") + case 1: + return l.DeleteFirewall(ctx, &firewalls[0]) + default: + klog.Errorf("Found more than one firewall attached to nodebalancer: %d, firewall IDs: %v", nb.ID, firewalls) + return ErrTooManyNBFirewalls + } + } + + return nil +} + +func ipsChanged(ips *linodego.NetworkAddresses, rules []linodego.FirewallRule) bool { + var ruleIPv4s []string + var ruleIPv6s []string + + for _, rule := range rules { + if rule.Addresses.IPv4 != nil { + ruleIPv4s = append(ruleIPv4s, *rule.Addresses.IPv4...) + } + if rule.Addresses.IPv6 != nil { + ruleIPv6s = append(ruleIPv6s, *rule.Addresses.IPv6...) + } + } + + if len(ruleIPv4s) > 0 && ips.IPv4 == nil { + return true + } + + if len(ruleIPv6s) > 0 && ips.IPv6 == nil { + return true + } + + if ips.IPv4 != nil { + if len(*ips.IPv4) != len(ruleIPv4s) { + return true + } + for _, ipv4 := range *ips.IPv4 { + if !slices.Contains(ruleIPv4s, ipv4) { + return true + } + } + } + + if ips.IPv6 != nil { + if len(*ips.IPv6) != len(ruleIPv6s) { + return true + } + for _, ipv6 := range *ips.IPv6 { + if !slices.Contains(ruleIPv6s, ipv6) { + return true + } + } + } + + return false +} + +// ruleChanged takes an old FirewallRuleSet and new aclConfig and returns if +// the IPs of the FirewallRuleSet would be changed with the new ACL Config +func ruleChanged(old linodego.FirewallRuleSet, newACL aclConfig) bool { + var ips *linodego.NetworkAddresses + if newACL.AllowList != nil { + // this is a allowList, this means that the rules should have `DROP` as inboundpolicy + if old.InboundPolicy != "DROP" { + return true + } + if (newACL.AllowList.IPv4 != nil || newACL.AllowList.IPv6 != nil) && len(old.Inbound) == 0 { + return true + } + ips = newACL.AllowList + } + + if newACL.DenyList != nil { + if old.InboundPolicy != "ACCEPT" { + return true + } + + if (newACL.DenyList.IPv4 != nil || newACL.DenyList.IPv6 != nil) && len(old.Inbound) == 0 { + return true + } + ips = newACL.DenyList + } + + return ipsChanged(ips, old.Inbound) +} + +func chunkIPs(ips []string) [][]string { + chunks := [][]string{} + ipCount := len(ips) + + // If the number of IPs is less than or equal to maxIPsPerFirewall, + // return a single chunk containing all IPs. + if ipCount <= maxIPsPerFirewall { + return [][]string{ips} + } + + // Otherwise, break the IPs into chunks with maxIPsPerFirewall IPs per chunk. + chunkCount := 0 + for ipCount > maxIPsPerFirewall { + start := chunkCount * maxIPsPerFirewall + end := (chunkCount + 1) * maxIPsPerFirewall + chunks = append(chunks, ips[start:end]) + chunkCount++ + ipCount -= maxIPsPerFirewall + } + + // Append the remaining IPs as a chunk. + chunks = append(chunks, ips[chunkCount*maxIPsPerFirewall:]) + + return chunks +} + +// processACL takes the IPs, aclType, label etc and formats them into the passed linodego.FirewallCreateOptions pointer. +func processACL(fwcreateOpts *linodego.FirewallCreateOptions, aclType, label, svcName, ports string, ips linodego.NetworkAddresses) error { + ruleLabel := fmt.Sprintf("%s-%s", aclType, svcName) + if len(ruleLabel) > maxFirewallRuleLabelLen { + newLabel := ruleLabel[0:maxFirewallRuleLabelLen] + klog.Infof("Firewall label '%s' is too long. Stripping to '%s'", ruleLabel, newLabel) + ruleLabel = newLabel + } + + // Linode has a limitation of firewall rules with a max of 255 IPs per rule + var ipv4s, ipv6s []string // doing this to avoid dereferencing a nil pointer + if ips.IPv6 != nil { + ipv6s = *ips.IPv6 + } + if ips.IPv4 != nil { + ipv4s = *ips.IPv4 + } + + if len(ipv4s)+len(ipv6s) > maxIPsPerFirewall { + ipv4chunks := chunkIPs(ipv4s) + for i, chunk := range ipv4chunks { + v4chunk := chunk + fwcreateOpts.Rules.Inbound = append(fwcreateOpts.Rules.Inbound, linodego.FirewallRule{ + Action: aclType, + Label: ruleLabel, + Description: fmt.Sprintf("Rule %d, Created by linode-ccm: %s, for %s", i, label, svcName), + Protocol: linodego.TCP, // Nodebalancers support only TCP. + Ports: ports, + Addresses: linodego.NetworkAddresses{IPv4: &v4chunk}, + }) + } + + ipv6chunks := chunkIPs(ipv6s) + for i, chunk := range ipv6chunks { + v6chunk := chunk + fwcreateOpts.Rules.Inbound = append(fwcreateOpts.Rules.Inbound, linodego.FirewallRule{ + Action: aclType, + Label: ruleLabel, + Description: fmt.Sprintf("Rule %d, Created by linode-ccm: %s, for %s", i, label, svcName), + Protocol: linodego.TCP, // Nodebalancers support only TCP. + Ports: ports, + Addresses: linodego.NetworkAddresses{IPv6: &v6chunk}, + }) + } + } else { + fwcreateOpts.Rules.Inbound = append(fwcreateOpts.Rules.Inbound, linodego.FirewallRule{ + Action: aclType, + Label: ruleLabel, + Description: fmt.Sprintf("Created by linode-ccm: %s, for %s", label, svcName), + Protocol: linodego.TCP, // Nodebalancers support only TCP. + Ports: ports, + Addresses: ips, + }) + } + + fwcreateOpts.Rules.OutboundPolicy = "ACCEPT" + if aclType == "ACCEPT" { + // if an allowlist is present, we drop everything else. + fwcreateOpts.Rules.InboundPolicy = "DROP" + } else { + // if a denylist is present, we accept everything else. + fwcreateOpts.Rules.InboundPolicy = "ACCEPT" + } + + if len(fwcreateOpts.Rules.Inbound) > maxRulesPerFirewall { + return ErrTooManyIPs + } + return nil +} + +// UpdateNodeBalancerFirewall reconciles the firewall attached to the nodebalancer +// +// This function does the following +// 1. If a firewallID annotation is present, it checks if the nodebalancer has a firewall attached, and if it matches the annotationID +// a. If the IDs match, nothing to do here. +// b. If they don't match, the nb is attached to the new firewall and removed from the old one. +// 2. If a firewallACL annotation is present, +// a. it checks if the nodebalancer has a firewall attached, if a fw exists, it updates rules +// b. if a fw does not exist, it creates one +// 3. If neither of these annotations are present, +// a. AND if no firewalls are attached to the nodebalancer, nothing to do. +// b. if the NB has ONE firewall attached, remove it from nb, and clean up if nothing else is attached to it +// c. If there are more than one fw attached to it, then its a problem, return an err +// 4. If both these annotations are present, the firewallID takes precedence, and the ACL annotation is ignored. +// +// IF a user creates a fw ID externally, and then switches to using a ACL, the CCM will take over the fw that's attached to the nodebalancer. +func (l *LinodeClient) UpdateNodeBalancerFirewall( + ctx context.Context, + loadBalancerName string, + loadBalancerTags []string, + service *v1.Service, + nb *linodego.NodeBalancer, +) error { + // get the new firewall id from the annotation (if any). + _, fwIDExists := service.GetAnnotations()[annotations.AnnLinodeCloudFirewallID] + if fwIDExists { // If an ID exists, we ignore everything else and handle just that + return l.updateServiceFirewall(ctx, service, nb) + } + + // See if a acl exists + _, fwACLExists := service.GetAnnotations()[annotations.AnnLinodeCloudFirewallACL] + if fwACLExists { // if an ACL exists, but no ID, just update the ACL on the fw. + return l.updateNodeBalancerFirewallWithACL(ctx, loadBalancerName, loadBalancerTags, service, nb) + } + + // No firewall ID or ACL annotation, see if there are firewalls attached to our nb + firewalls, err := l.Client.ListNodeBalancerFirewalls(ctx, nb.ID, &linodego.ListOptions{}) + if err != nil { + return err + } + + if len(firewalls) == 0 { + return nil + } + if len(firewalls) > 1 { + klog.Errorf("Found more than one firewall attached to nodebalancer: %d, firewall IDs: %v", nb.ID, firewalls) + return ErrTooManyNBFirewalls + } + deviceID, deviceExists, err := l.getNodeBalancerDeviceID(ctx, firewalls[0].ID, nb.ID) + if err != nil { + return err + } + if deviceExists { + err = l.Client.DeleteFirewallDevice(ctx, firewalls[0].ID, deviceID) + if err != nil { + return err + } + } + + // once we delete the device, we should see if there's anything attached to that firewall + devices, err := l.Client.ListFirewallDevices(ctx, firewalls[0].ID, &linodego.ListOptions{}) + if err != nil { + return err + } + + if len(devices) == 0 { + // nothing attached to it, clean it up + return l.Client.DeleteFirewall(ctx, firewalls[0].ID) + } + // else let that firewall linger, don't mess with it. + + return nil +} + +// getNodeBalancerDeviceID gets the deviceID of the nodeBalancer that is attached to the firewall. +func (l *LinodeClient) getNodeBalancerDeviceID(ctx context.Context, firewallID, nbID int) (int, bool, error) { + devices, err := l.Client.ListFirewallDevices(ctx, firewallID, &linodego.ListOptions{}) + if err != nil { + return 0, false, err + } + + if len(devices) == 0 { + return 0, false, nil + } + + for _, device := range devices { + if device.Entity.ID == nbID { + return device.ID, true, nil + } + } + + return 0, false, nil +} + +// Updates a service that has a firewallID annotation set. +// If an annotation is set, and the nodebalancer has a firewall that matches the ID, nothing to do +// If there's more than one firewall attached to the node-balancer, an error is returned as its not a supported use case. +// If there's only one firewall attached and it doesn't match what's in the annotation, the new firewall is attached and the old one removed +func (l *LinodeClient) updateServiceFirewall(ctx context.Context, service *v1.Service, nb *linodego.NodeBalancer) error { + var newFirewallID int + var err error + + // See if a firewall is attached to the nodebalancer first. + firewalls, err := l.Client.ListNodeBalancerFirewalls(ctx, nb.ID, &linodego.ListOptions{}) + if err != nil { + return err + } + if len(firewalls) > 1 { + klog.Errorf("Found more than one firewall attached to nodebalancer: %d, firewall IDs: %v", nb.ID, firewalls) + return ErrTooManyNBFirewalls + } + + // get the ID of the firewall that is already attached to the nodeBalancer, if we have one. + var existingFirewallID int + if len(firewalls) == 1 { + existingFirewallID = firewalls[0].ID + } + + fwID := service.GetAnnotations()[annotations.AnnLinodeCloudFirewallID] + newFirewallID, err = strconv.Atoi(fwID) + if err != nil { + return err + } + // if existing firewall and new firewall differs, attach the new firewall and remove the old. + if existingFirewallID != newFirewallID { + // attach new firewall. + _, err = l.Client.CreateFirewallDevice(ctx, newFirewallID, linodego.FirewallDeviceCreateOptions{ + ID: nb.ID, + Type: "nodebalancer", + }) + if err != nil { + return err + } + // remove the existing firewall if it exists + if existingFirewallID != 0 { + deviceID, deviceExists, err := l.getNodeBalancerDeviceID(ctx, existingFirewallID, nb.ID) + if err != nil { + return err + } + + if !deviceExists { + return fmt.Errorf("error in fetching attached nodeBalancer device") + } + + if err = l.Client.DeleteFirewallDevice(ctx, existingFirewallID, deviceID); err != nil { + return err + } + } + } + return nil +} + +func (l *LinodeClient) updateNodeBalancerFirewallWithACL( + ctx context.Context, + loadBalancerName string, + loadBalancerTags []string, + service *v1.Service, + nb *linodego.NodeBalancer, +) error { + // See if a firewall is attached to the nodebalancer first. + firewalls, err := l.Client.ListNodeBalancerFirewalls(ctx, nb.ID, &linodego.ListOptions{}) + if err != nil { + return err + } + + switch len(firewalls) { + case 0: + { + // need to create a fw and attach it to our nb + fwcreateOpts, err := CreateFirewallOptsForSvc(loadBalancerName, loadBalancerTags, service) + if err != nil { + return err + } + + fw, err := l.Client.CreateFirewall(ctx, *fwcreateOpts) + if err != nil { + return err + } + // attach new firewall. + if _, err = l.Client.CreateFirewallDevice(ctx, fw.ID, linodego.FirewallDeviceCreateOptions{ + ID: nb.ID, + Type: "nodebalancer", + }); err != nil { + return err + } + } + case 1: + { + // We do not want to get into the complexity of reconciling differences, might as well just pull what's in the svc annotation now and update the fw. + var acl aclConfig + err := json.Unmarshal([]byte(service.GetAnnotations()[annotations.AnnLinodeCloudFirewallACL]), &acl) + if err != nil { + return err + } + + changed := ruleChanged(firewalls[0].Rules, acl) + if !changed { + return nil + } + + fwCreateOpts, err := CreateFirewallOptsForSvc(service.Name, []string{""}, service) + if err != nil { + return err + } + if _, err = l.Client.UpdateFirewallRules(ctx, firewalls[0].ID, fwCreateOpts.Rules); err != nil { + return err + } + } + default: + klog.Errorf("Found more than one firewall attached to nodebalancer: %d, firewall IDs: %v", nb.ID, firewalls) + return ErrTooManyNBFirewalls + } + return nil +} + +func CreateFirewallOptsForSvc(label string, tags []string, svc *v1.Service) (*linodego.FirewallCreateOptions, error) { + // Fetch acl from annotation + aclString := svc.GetAnnotations()[annotations.AnnLinodeCloudFirewallACL] + fwcreateOpts := linodego.FirewallCreateOptions{ + Label: label, + Tags: tags, + } + servicePorts := make([]string, 0, len(svc.Spec.Ports)) + for _, port := range svc.Spec.Ports { + servicePorts = append(servicePorts, strconv.Itoa(int(port.Port))) + } + + portsString := strings.Join(servicePorts[:], ",") + var acl aclConfig + if err := json.Unmarshal([]byte(aclString), &acl); err != nil { + return nil, err + } + // it is a problem if both are set, or if both are not set + if (acl.AllowList != nil && acl.DenyList != nil) || (acl.AllowList == nil && acl.DenyList == nil) { + return nil, ErrInvalidFWConfig + } + + aclType := "ACCEPT" + allowedIPs := acl.AllowList + if acl.DenyList != nil { + aclType = "DROP" + allowedIPs = acl.DenyList + } + + if err := processACL(&fwcreateOpts, aclType, label, svc.Name, portsString, *allowedIPs); err != nil { + return nil, err + } + return &fwcreateOpts, nil +} diff --git a/cloud/linode/health_check.go b/cloud/linode/health_check.go new file mode 100644 index 00000000..dc0d0e30 --- /dev/null +++ b/cloud/linode/health_check.go @@ -0,0 +1,58 @@ +package linode + +import ( + "context" + "time" + + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/klog/v2" +) + +type healthChecker struct { + period time.Duration + linodeClient client.Client + stopCh chan<- struct{} +} + +func newHealthChecker(client client.Client, period time.Duration, stopCh chan<- struct{}) *healthChecker { + return &healthChecker{ + period: period, + linodeClient: client, + stopCh: stopCh, + } +} + +func (r *healthChecker) Run(stopCh <-chan struct{}) { + ctx := wait.ContextForChannel(stopCh) + wait.Until(r.worker(ctx), r.period, stopCh) +} + +func (r *healthChecker) worker(ctx context.Context) func() { + return func() { + r.do(ctx) + } +} + +func (r *healthChecker) do(ctx context.Context) { + if r.stopCh == nil { + klog.Errorf("stop signal already fired. nothing to do") + return + } + + authenticated, err := client.CheckClientAuthenticated(ctx, r.linodeClient) + if err != nil { + klog.Warningf("unable to determine linode client authentication status: %s", err.Error()) + return + } + + if !authenticated { + klog.Error("detected invalid linode api token: stopping controllers") + + close(r.stopCh) + r.stopCh = nil + return + } + + klog.Info("linode api token is healthy") +} diff --git a/cloud/linode/health_check_test.go b/cloud/linode/health_check_test.go new file mode 100644 index 00000000..bd1570d0 --- /dev/null +++ b/cloud/linode/health_check_test.go @@ -0,0 +1,138 @@ +package linode + +import ( + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client/mocks" + "github.com/linode/linodego" +) + +func TestHealthCheck(t *testing.T) { + testCases := []struct { + name string + f func(*testing.T, *mocks.MockClient) + }{ + { + name: "Test succeeding calls to linode api stop signal is not fired", + f: testSucceedingCallsToLinodeAPIHappenStopSignalNotFired, + }, + { + name: "Test Unauthorized calls to linode api stop signal is fired", + f: testFailingCallsToLinodeAPIHappenStopSignalFired, + }, + { + name: "Test failing calls to linode api stop signal is not fired", + f: testErrorCallsToLinodeAPIHappenStopSignalNotFired, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + client := mocks.NewMockClient(ctrl) + tc.f(t, client) + }) + } +} + +func testSucceedingCallsToLinodeAPIHappenStopSignalNotFired(t *testing.T, client *mocks.MockClient) { + writableStopCh := make(chan struct{}) + readableStopCh := make(chan struct{}) + + client.EXPECT().GetProfile(gomock.Any()).Times(2).Return(&linodego.Profile{}, nil) + + hc := newHealthChecker(client, 1*time.Second, writableStopCh) + + defer close(readableStopCh) + go hc.Run(readableStopCh) + + // wait for two checks to happen + time.Sleep(1500 * time.Millisecond) + + select { + case <-writableStopCh: + t.Error("healthChecker sent stop signal") + default: + } +} + +func testFailingCallsToLinodeAPIHappenStopSignalFired(t *testing.T, client *mocks.MockClient) { + writableStopCh := make(chan struct{}) + readableStopCh := make(chan struct{}) + + client.EXPECT().GetProfile(gomock.Any()).Times(1).Return(&linodego.Profile{}, nil) + + hc := newHealthChecker(client, 1*time.Second, writableStopCh) + + defer close(readableStopCh) + go hc.Run(readableStopCh) + + // wait for check to happen + time.Sleep(500 * time.Millisecond) + + select { + case <-writableStopCh: + t.Error("healthChecker sent stop signal") + default: + } + + // invalidate token + client.EXPECT().GetProfile(gomock.Any()).Times(1).Return(&linodego.Profile{}, &linodego.Error{Code: 401, Message: "Invalid Token"}) + + // wait for check to happen + time.Sleep(1 * time.Second) + + select { + case <-writableStopCh: + default: + t.Error("healthChecker did not send stop signal") + } +} + +func testErrorCallsToLinodeAPIHappenStopSignalNotFired(t *testing.T, client *mocks.MockClient) { + writableStopCh := make(chan struct{}) + readableStopCh := make(chan struct{}) + + client.EXPECT().GetProfile(gomock.Any()).Times(1).Return(&linodego.Profile{}, nil) + + hc := newHealthChecker(client, 1*time.Second, writableStopCh) + + defer close(readableStopCh) + go hc.Run(readableStopCh) + + // wait for check to happen + time.Sleep(500 * time.Millisecond) + + select { + case <-writableStopCh: + t.Error("healthChecker sent stop signal") + default: + } + + // simulate server error + client.EXPECT().GetProfile(gomock.Any()).Times(1).Return(&linodego.Profile{}, &linodego.Error{Code: 500}) + + // wait for check to happen + time.Sleep(1 * time.Second) + + select { + case <-writableStopCh: + t.Error("healthChecker sent stop signal") + default: + } + + client.EXPECT().GetProfile(gomock.Any()).Times(1).Return(&linodego.Profile{}, nil) + + // wait for check to happen + time.Sleep(1 * time.Second) + + select { + case <-writableStopCh: + t.Error("healthChecker sent stop signal") + default: + } +} diff --git a/cloud/linode/instances.go b/cloud/linode/instances.go index 26c762d0..c93a9fa7 100644 --- a/cloud/linode/instances.go +++ b/cloud/linode/instances.go @@ -4,27 +4,67 @@ import ( "context" "fmt" "os" + "slices" "strconv" + "strings" "sync" "time" - "github.com/linode/linode-cloud-controller-manager/sentry" "github.com/linode/linodego" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog/v2" + + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client" + "github.com/linode/linode-cloud-controller-manager/sentry" ) +type nodeIP struct { + ip string + ipType v1.NodeAddressType +} + +type linodeInstance struct { + instance *linodego.Instance + ips []nodeIP +} + type nodeCache struct { sync.RWMutex - nodes map[int]*linodego.Instance + nodes map[int]linodeInstance lastUpdate time.Time ttl time.Duration } +// getInstanceAddresses returns all addresses configured on a linode. +func (nc *nodeCache) getInstanceAddresses(instance linodego.Instance, vpcips []string) []nodeIP { + ips := []nodeIP{} + + // If vpc ips are present, list them first + for _, ip := range vpcips { + ipType := v1.NodeInternalIP + ips = append(ips, nodeIP{ip: ip, ipType: ipType}) + } + + for _, ip := range instance.IPv4 { + ipType := v1.NodeExternalIP + if isPrivate(ip) { + ipType = v1.NodeInternalIP + } + ips = append(ips, nodeIP{ip: ip.String(), ipType: ipType}) + } + + if instance.IPv6 != "" { + ips = append(ips, nodeIP{ip: strings.TrimSuffix(instance.IPv6, "/128"), ipType: v1.NodeExternalIP}) + } + + return ips +} + // refreshInstances conditionally loads all instances from the Linode API and caches them. // It does not refresh if the last update happened less than `nodeCache.ttl` ago. -func (nc *nodeCache) refreshInstances(ctx context.Context, client Client) error { +func (nc *nodeCache) refreshInstances(ctx context.Context, client client.Client) error { nc.Lock() defer nc.Unlock() @@ -36,32 +76,64 @@ func (nc *nodeCache) refreshInstances(ctx context.Context, client Client) error if err != nil { return err } - nc.nodes = make(map[int]*linodego.Instance) - for _, instance := range instances { - instance := instance - nc.nodes[instance.ID] = &instance + + // If running within VPC, find instances and store their ips + vpcNodes := map[int][]string{} + vpcNames := strings.Split(Options.VPCNames, ",") + for _, v := range vpcNames { + vpcName := strings.TrimSpace(v) + if vpcName == "" { + continue + } + resp, err := GetVPCIPAddresses(ctx, client, vpcName) + if err != nil { + klog.Errorf("failed updating instances cache for VPC %s. Error: %s", vpcName, err.Error()) + continue + } + for _, r := range resp { + if r.Address == nil { + continue + } + vpcNodes[r.LinodeID] = append(vpcNodes[r.LinodeID], *r.Address) + } + } + + newNodes := make(map[int]linodeInstance, len(instances)) + for i, instance := range instances { + + // if running within VPC, only store instances in cache which are part of VPC + if Options.VPCNames != "" && len(vpcNodes[instance.ID]) == 0 { + continue + } + node := linodeInstance{ + instance: &instances[i], + ips: nc.getInstanceAddresses(instance, vpcNodes[instance.ID]), + } + newNodes[instance.ID] = node } - nc.lastUpdate = time.Now() + nc.nodes = newNodes + nc.lastUpdate = time.Now() return nil } type instances struct { - client Client + client client.Client nodeCache *nodeCache } -func newInstances(client Client) *instances { +func newInstances(client client.Client) *instances { timeout := 15 if raw, ok := os.LookupEnv("LINODE_INSTANCE_CACHE_TTL"); ok { if t, _ := strconv.Atoi(raw); t > 0 { timeout = t } } + klog.V(3).Infof("TTL for nodeCache set to %d", timeout) return &instances{client, &nodeCache{ - nodes: make(map[int]*linodego.Instance), + nodes: make(map[int]linodeInstance, 0), ttl: time.Duration(timeout) * time.Second, }} } @@ -74,26 +146,62 @@ func (e instanceNoIPAddressesError) Error() string { return fmt.Sprintf("instance %d has no IP addresses", e.id) } -func (i *instances) linodeByName(nodeName types.NodeName) (*linodego.Instance, error) { +func (i *instances) linodeByIP(kNode *v1.Node) (*linodego.Instance, error) { i.nodeCache.RLock() defer i.nodeCache.RUnlock() + var kNodeAddresses []string + for _, address := range kNode.Status.Addresses { + if address.Type == v1.NodeExternalIP || address.Type == v1.NodeInternalIP { + kNodeAddresses = append(kNodeAddresses, address.Address) + } + } + if kNodeAddresses == nil { + return nil, fmt.Errorf("no IP address found on node %s", kNode.Name) + } for _, node := range i.nodeCache.nodes { - if node.Label == string(nodeName) { - return node, nil + for _, nodeIP := range node.instance.IPv4 { + if !isPrivate(nodeIP) && slices.Contains(kNodeAddresses, nodeIP.String()) { + return node.instance, nil + } } } return nil, cloudprovider.InstanceNotFound } +func (i *instances) linodeByName(nodeName types.NodeName) *linodego.Instance { + i.nodeCache.RLock() + defer i.nodeCache.RUnlock() + for _, node := range i.nodeCache.nodes { + if node.instance.Label == string(nodeName) { + return node.instance + } + } + + return nil +} + func (i *instances) linodeByID(id int) (*linodego.Instance, error) { i.nodeCache.RLock() defer i.nodeCache.RUnlock() - instance, ok := i.nodeCache.nodes[id] + linodeInstance, ok := i.nodeCache.nodes[id] if !ok { return nil, cloudprovider.InstanceNotFound } - return instance, nil + return linodeInstance.instance, nil +} + +// listAllInstances returns all instances in nodeCache +func (i *instances) listAllInstances(ctx context.Context) ([]linodego.Instance, error) { + if err := i.nodeCache.refreshInstances(ctx, i.client); err != nil { + return nil, err + } + + instances := []linodego.Instance{} + for _, linodeInstance := range i.nodeCache.nodes { + instances = append(instances, *linodeInstance.instance) + } + return instances, nil } func (i *instances) lookupLinode(ctx context.Context, node *v1.Node) (*linodego.Instance, error) { @@ -117,8 +225,12 @@ func (i *instances) lookupLinode(ctx context.Context, node *v1.Node) (*linodego. return i.linodeByID(id) } + instance := i.linodeByName(nodeName) + if instance != nil { + return instance, nil + } - return i.linodeByName(nodeName) + return i.linodeByIP(node) } func (i *instances) InstanceExists(ctx context.Context, node *v1.Node) (bool, error) { @@ -160,22 +272,44 @@ func (i *instances) InstanceMetadata(ctx context.Context, node *v1.Node) (*cloud return nil, err } - if len(linode.IPv4) == 0 { + ips, err := i.getLinodeAddresses(ctx, node) + if err != nil { + sentry.CaptureError(ctx, err) + return nil, err + } + + if len(ips) == 0 { err := instanceNoIPAddressesError{linode.ID} sentry.CaptureError(ctx, err) return nil, err } addresses := []v1.NodeAddress{{Type: v1.NodeHostName, Address: linode.Label}} + for _, ip := range ips { + addresses = append(addresses, v1.NodeAddress{Type: ip.ipType, Address: ip.ip}) + } - for _, ip := range linode.IPv4 { - ipType := v1.NodeExternalIP - if ip.IsPrivate() { - ipType = v1.NodeInternalIP + // create temporary uniqueAddrs cache just for reference + uniqueAddrs := make(map[string]v1.NodeAddressType, len(node.Status.Addresses)+len(ips)) + for _, ip := range addresses { + if _, ok := uniqueAddrs[ip.Address]; ok { + continue } - addresses = append(addresses, v1.NodeAddress{Type: ipType, Address: ip.String()}) + uniqueAddrs[ip.Address] = ip.Type } + // include IPs set by kubelet for internal node IP + for _, addr := range node.Status.Addresses { + if _, ok := uniqueAddrs[addr.Address]; ok { + continue + } + if addr.Type == v1.NodeInternalIP { + uniqueAddrs[addr.Address] = v1.NodeInternalIP + addresses = append(addresses, v1.NodeAddress{Type: addr.Type, Address: addr.Address}) + } + } + + klog.Infof("Instance %s, assembled IP addresses: %v", node.Name, addresses) // note that Zone is omitted as it's not a thing in Linode meta := &cloudprovider.InstanceMetadata{ ProviderID: fmt.Sprintf("%v%v", providerIDPrefix, linode.ID), @@ -186,3 +320,23 @@ func (i *instances) InstanceMetadata(ctx context.Context, node *v1.Node) (*cloud return meta, nil } + +func (i *instances) getLinodeAddresses(ctx context.Context, node *v1.Node) ([]nodeIP, error) { + ctx = sentry.SetHubOnContext(ctx) + instance, err := i.lookupLinode(ctx, node) + if err != nil { + sentry.CaptureError(ctx, err) + return nil, err + } + + i.nodeCache.RLock() + defer i.nodeCache.RUnlock() + linodeInstance, ok := i.nodeCache.nodes[instance.ID] + if !ok || len(linodeInstance.ips) == 0 { + err := instanceNoIPAddressesError{instance.ID} + sentry.CaptureError(ctx, err) + return nil, err + } + + return linodeInstance.ips, nil +} diff --git a/cloud/linode/instances_test.go b/cloud/linode/instances_test.go index a93711a1..6cd6e183 100644 --- a/cloud/linode/instances_test.go +++ b/cloud/linode/instances_test.go @@ -4,10 +4,13 @@ import ( "context" "fmt" "net" + "slices" "strconv" + "strings" "testing" "github.com/golang/mock/gomock" + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client/mocks" "github.com/linode/linodego" "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" @@ -30,7 +33,7 @@ func TestInstanceExists(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - client := NewMockClient(ctrl) + client := mocks.NewMockClient(ctrl) t.Run("should return false if linode does not exist (by providerID)", func(t *testing.T) { instances := newInstances(client) @@ -42,17 +45,6 @@ func TestInstanceExists(t *testing.T) { assert.False(t, exists) }) - t.Run("should return false if linode does not exist (by name)", func(t *testing.T) { - instances := newInstances(client) - name := "some-name" - node := nodeWithName(name) - client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{}, nil) - - exists, err := instances.InstanceExists(ctx, node) - assert.NoError(t, err) - assert.False(t, exists) - }) - t.Run("should return true if linode exists (by provider)", func(t *testing.T) { instances := newInstances(client) node := nodeWithProviderID(providerIDPrefix + "123") @@ -90,17 +82,20 @@ func TestMetadataRetrieval(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - client := NewMockClient(ctrl) + client := mocks.NewMockClient(ctrl) - t.Run("errors when linode does not exist (by name)", func(t *testing.T) { + t.Run("uses name over IP for finding linode", func(t *testing.T) { instances := newInstances(client) - name := "does-not-exist" + publicIP := net.ParseIP("172.234.31.123") + privateIP := net.ParseIP("192.168.159.135") + expectedInstance := linodego.Instance{Label: "expected-instance", ID: 12345, IPv4: []*net.IP{&publicIP, &privateIP}} + client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{{Label: "wrong-instance", ID: 3456, IPv4: []*net.IP{&publicIP, &privateIP}}, expectedInstance}, nil) + name := "expected-instance" node := nodeWithName(name) - client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{}, nil) meta, err := instances.InstanceMetadata(ctx, node) - assert.ErrorIs(t, err, cloudprovider.InstanceNotFound) - assert.Nil(t, meta) + assert.Nil(t, err) + assert.Equal(t, providerIDPrefix+strconv.Itoa(expectedInstance.ID), meta.ProviderID) }) t.Run("fails when linode does not exist (by provider)", func(t *testing.T) { @@ -133,7 +128,7 @@ func TestMetadataRetrieval(t *testing.T) { assert.Equal(t, providerIDPrefix+strconv.Itoa(id), meta.ProviderID) assert.Equal(t, region, meta.Region) assert.Equal(t, linodeType, meta.InstanceType) - assert.Equal(t, meta.NodeAddresses, []v1.NodeAddress{ + assert.Equal(t, []v1.NodeAddress{ { Type: v1.NodeHostName, Address: name, @@ -146,46 +141,183 @@ func TestMetadataRetrieval(t *testing.T) { Type: v1.NodeInternalIP, Address: privateIPv4.String(), }, - }) + }, meta.NodeAddresses) + }) + + t.Run("should return data when linode is found (by name) and addresses must be in order", func(t *testing.T) { + instances := newInstances(client) + id := 123 + name := "mock-instance" + node := nodeWithName(name) + publicIPv4 := net.ParseIP("45.76.101.25") + privateIPv4 := net.ParseIP("192.168.133.65") + ipv6Addr := "2001::8a2e:370:7348" + linodeType := "g6-standard-1" + region := "us-east" + + Options.VPCNames = "test" + vpcIDs["test"] = 1 + Options.EnableRouteController = true + + instance := linodego.Instance{ + ID: id, + Label: name, + Type: linodeType, + Region: region, + IPv4: []*net.IP{&publicIPv4, &privateIPv4}, + IPv6: ipv6Addr, + } + vpcIP := "10.0.0.2" + addressRange1 := "10.192.0.0/24" + addressRange2 := "10.192.10.0/24" + routesInVPC := []linodego.VPCIP{ + { + Address: &vpcIP, + AddressRange: nil, + VPCID: vpcIDs["test"], + NAT1To1: nil, + LinodeID: id, + }, + { + Address: nil, + AddressRange: &addressRange1, + VPCID: vpcIDs["test"], + NAT1To1: nil, + LinodeID: id, + }, + { + Address: nil, + AddressRange: &addressRange2, + VPCID: vpcIDs["test"], + NAT1To1: nil, + LinodeID: id, + }, + } + + client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{instance}, nil) + client.EXPECT().ListVPCIPAddresses(gomock.Any(), vpcIDs["test"], gomock.Any()).Return(routesInVPC, nil) + + meta, err := instances.InstanceMetadata(ctx, node) + assert.NoError(t, err) + assert.Equal(t, providerIDPrefix+strconv.Itoa(id), meta.ProviderID) + assert.Equal(t, region, meta.Region) + assert.Equal(t, linodeType, meta.InstanceType) + assert.Equal(t, []v1.NodeAddress{ + { + Type: v1.NodeHostName, + Address: name, + }, + { + Type: v1.NodeInternalIP, + Address: vpcIP, + }, + { + Type: v1.NodeExternalIP, + Address: publicIPv4.String(), + }, + { + Type: v1.NodeInternalIP, + Address: privateIPv4.String(), + }, + { + Type: v1.NodeExternalIP, + Address: ipv6Addr, + }, + }, meta.NodeAddresses) + + Options.VPCNames = "" }) ipTests := []struct { - name string - inputIPs []string - outputAddresses []v1.NodeAddress - expectedErr error + name string + inputIPv4s []string + inputIPv6 string + externalNetwork string + existingAddresses []v1.NodeAddress + outputAddresses []v1.NodeAddress + expectedErr error }{ - {"no IPs", nil, nil, instanceNoIPAddressesError{192910}}, + { + "no IPs", + nil, + "", + "", + nil, + nil, + instanceNoIPAddressesError{192910}, + }, { "one public, one private", []string{"32.74.121.25", "192.168.121.42"}, + "", + "", + nil, []v1.NodeAddress{{Type: v1.NodeExternalIP, Address: "32.74.121.25"}, {Type: v1.NodeInternalIP, Address: "192.168.121.42"}}, nil, }, + { + "one public ipv4, one public ipv6", + []string{"32.74.121.25"}, + "2600:3c06::f03c:94ff:fe1e:e072", + "", + nil, + []v1.NodeAddress{{Type: v1.NodeExternalIP, Address: "32.74.121.25"}, {Type: v1.NodeExternalIP, Address: "2600:3c06::f03c:94ff:fe1e:e072"}}, + nil, + }, { "one public, no private", []string{"32.74.121.25"}, + "", + "", + nil, []v1.NodeAddress{{Type: v1.NodeExternalIP, Address: "32.74.121.25"}}, nil, }, { "one private, no public", []string{"192.168.121.42"}, + "", + "", + nil, []v1.NodeAddress{{Type: v1.NodeInternalIP, Address: "192.168.121.42"}}, nil, }, { "two public addresses", []string{"32.74.121.25", "32.74.121.22"}, + "", + "", + nil, []v1.NodeAddress{{Type: v1.NodeExternalIP, Address: "32.74.121.25"}, {Type: v1.NodeExternalIP, Address: "32.74.121.22"}}, nil, }, { "two private addresses", []string{"192.168.121.42", "10.0.2.15"}, + "", + "", + nil, []v1.NodeAddress{{Type: v1.NodeInternalIP, Address: "192.168.121.42"}, {Type: v1.NodeInternalIP, Address: "10.0.2.15"}}, nil, }, + { + "two private addresses - one in network marked as external", + []string{"192.168.121.42", "10.0.2.15"}, + "", + "10.0.2.0/16", + nil, + []v1.NodeAddress{{Type: v1.NodeInternalIP, Address: "192.168.121.42"}, {Type: v1.NodeExternalIP, Address: "10.0.2.15"}}, + nil, + }, + { + "one private address, one existing internal IP set on the node", + []string{"192.168.121.42"}, + "", + "", + []v1.NodeAddress{{Type: v1.NodeInternalIP, Address: "10.0.0.1"}}, + []v1.NodeAddress{{Type: v1.NodeInternalIP, Address: "192.168.121.42"}, {Type: v1.NodeInternalIP, Address: "10.0.0.1"}}, + nil, + }, } for _, test := range ipTests { @@ -195,9 +327,16 @@ func TestMetadataRetrieval(t *testing.T) { name := "my-instance" providerID := providerIDPrefix + strconv.Itoa(id) node := nodeWithProviderID(providerID) - - ips := make([]*net.IP, 0, len(test.inputIPs)) - for _, ip := range test.inputIPs { + if test.externalNetwork == "" { + Options.LinodeExternalNetwork = nil + } else { + _, Options.LinodeExternalNetwork, _ = net.ParseCIDR(test.externalNetwork) + } + if test.existingAddresses != nil { + node.Status.Addresses = append(node.Status.Addresses, test.existingAddresses...) + } + ips := make([]*net.IP, 0, len(test.inputIPv4s)) + for _, ip := range test.inputIPv4s { parsed := net.ParseIP(ip) if parsed == nil { t.Fatalf("cannot parse %v as an ipv4", ip) @@ -208,7 +347,7 @@ func TestMetadataRetrieval(t *testing.T) { linodeType := "g6-standard-1" region := "us-east" client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{ - {ID: id, Label: name, Type: linodeType, Region: region, IPv4: ips}, + {ID: id, Label: name, Type: linodeType, Region: region, IPv4: ips, IPv6: test.inputIPv6}, }, nil) meta, err := instances.InstanceMetadata(ctx, node) @@ -220,9 +359,74 @@ func TestMetadataRetrieval(t *testing.T) { addresses := append([]v1.NodeAddress{ {Type: v1.NodeHostName, Address: name}, }, test.outputAddresses...) + slices.SortFunc(meta.NodeAddresses, func(a v1.NodeAddress, b v1.NodeAddress) int { + return strings.Compare(a.Address, b.Address) + }) + slices.SortFunc(addresses, func(a, b v1.NodeAddress) int { + return strings.Compare(a.Address, b.Address) + }) assert.Equal(t, meta.NodeAddresses, addresses) } }) + + getByIPTests := []struct { + name string + nodeAddresses []v1.NodeAddress + expectedErr error + }{ + {name: "gets linode by External IP", nodeAddresses: []v1.NodeAddress{{ + Type: "ExternalIP", + Address: "172.234.31.123", + }, { + Type: "InternalIP", + Address: "192.168.159.135", + }}}, + { + name: "returns error on node with only internal IP", nodeAddresses: []v1.NodeAddress{{ + Type: "ExternalIP", + Address: "123.2.1.23", + }, { + Type: "InternalIP", + Address: "192.168.159.135", + }}, + expectedErr: cloudprovider.InstanceNotFound, + }, + { + name: "returns error on no matching nodes by IP", nodeAddresses: []v1.NodeAddress{{ + Type: "ExternalIP", + Address: "123.2.1.23", + }, { + Type: "InternalIP", + Address: "192.168.10.10", + }}, + expectedErr: cloudprovider.InstanceNotFound, + }, + { + name: "returns error on no node IPs", nodeAddresses: []v1.NodeAddress{}, + expectedErr: fmt.Errorf("no IP address found on node test-node-1"), + }, + } + + publicIP := net.ParseIP("172.234.31.123") + privateIP := net.ParseIP("192.168.159.135") + wrongIP := net.ParseIP("1.2.3.4") + expectedInstance := linodego.Instance{Label: "expected-instance", ID: 12345, IPv4: []*net.IP{&publicIP, &privateIP}} + + for _, test := range getByIPTests { + t.Run(fmt.Sprintf("gets linode by IP - %s", test.name), func(t *testing.T) { + instances := newInstances(client) + client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{{ID: 3456, IPv4: []*net.IP{&wrongIP}}, expectedInstance}, nil) + node := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "test-node-1"}, Status: v1.NodeStatus{Addresses: test.nodeAddresses}} + meta, err := instances.InstanceMetadata(ctx, &node) + if test.expectedErr != nil { + assert.Nil(t, meta) + assert.Equal(t, err, test.expectedErr) + } else { + assert.Nil(t, err) + assert.Equal(t, providerIDPrefix+strconv.Itoa(expectedInstance.ID), meta.ProviderID) + } + }) + } } } @@ -231,7 +435,7 @@ func TestMalformedProviders(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - client := NewMockClient(ctrl) + client := mocks.NewMockClient(ctrl) t.Run("fails on non-numeric providerID", func(t *testing.T) { instances := newInstances(client) @@ -250,7 +454,7 @@ func TestInstanceShutdown(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() - client := NewMockClient(ctrl) + client := mocks.NewMockClient(ctrl) t.Run("fails when instance not found (by provider)", func(t *testing.T) { instances := newInstances(client) diff --git a/cloud/linode/loadbalancers.go b/cloud/linode/loadbalancers.go index 72d22697..cccacde2 100644 --- a/cloud/linode/loadbalancers.go +++ b/cloud/linode/loadbalancers.go @@ -12,9 +12,10 @@ import ( "strings" "time" - "golang.org/x/exp/slices" - + ciliumclient "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1" + "github.com/linode/linodego" v1 "k8s.io/api/core/v1" + k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" @@ -22,19 +23,13 @@ import ( cloudprovider "k8s.io/cloud-provider" "k8s.io/klog/v2" + "github.com/linode/linode-cloud-controller-manager/cloud/annotations" + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client" + "github.com/linode/linode-cloud-controller-manager/cloud/linode/firewall" "github.com/linode/linode-cloud-controller-manager/sentry" - "github.com/linode/linodego" ) -const ( - maxFirewallRuleLabelLen = 32 -) - -var ( - errNoNodesAvailable = errors.New("No nodes available for nodebalancer") - errInvalidFWConfig = errors.New("Specify either an allowList or a denyList for a firewall") - errTooManyFirewalls = errors.New("Too many firewalls attached to a nodebalancer") -) +var errNoNodesAvailable = errors.New("no nodes available for nodebalancer") type lbNotFoundError struct { serviceNn string @@ -49,9 +44,11 @@ func (e lbNotFoundError) Error() string { } type loadbalancers struct { - client Client - zone string - kubeClient kubernetes.Interface + client client.Client + zone string + kubeClient kubernetes.Interface + ciliumClient ciliumclient.CiliumV2alpha1Interface + loadBalancerType string } type portConfigAnnotation struct { @@ -68,12 +65,12 @@ type portConfig struct { } // newLoadbalancers returns a cloudprovider.LoadBalancer whose concrete type is a *loadbalancer. -func newLoadbalancers(client Client, zone string) cloudprovider.LoadBalancer { - return &loadbalancers{client: client, zone: zone} +func newLoadbalancers(client client.Client, zone string) cloudprovider.LoadBalancer { + return &loadbalancers{client: client, zone: zone, loadBalancerType: Options.LoadBalancerType} } func (l *loadbalancers) getNodeBalancerForService(ctx context.Context, service *v1.Service) (*linodego.NodeBalancer, error) { - rawID := service.GetAnnotations()[annLinodeNodeBalancerID] + rawID := service.GetAnnotations()[annotations.AnnLinodeNodeBalancerID] id, idErr := strconv.Atoi(rawID) hasIDAnn := idErr == nil && id != 0 @@ -119,7 +116,7 @@ func (l *loadbalancers) getNodeBalancerByStatus(ctx context.Context, service *v1 func (l *loadbalancers) cleanupOldNodeBalancer(ctx context.Context, service *v1.Service) error { // unless there's an annotation, we can never get a past and current NB to differ, // because they're looked up the same way - if _, ok := service.GetAnnotations()[annLinodeNodeBalancerID]; !ok { + if _, ok := service.GetAnnotations()[annotations.AnnLinodeNodeBalancerID]; !ok { return nil } @@ -167,6 +164,13 @@ func (l *loadbalancers) GetLoadBalancer(ctx context.Context, clusterName string, sentry.SetTag(ctx, "cluster_name", clusterName) sentry.SetTag(ctx, "service", service.Name) + // Handle LoadBalancers backed by Cilium + if l.loadBalancerType == ciliumLBType { + return &v1.LoadBalancerStatus{ + Ingress: service.Status.LoadBalancer.Ingress, + }, true, nil + } + nb, err := l.getNodeBalancerForService(ctx, service) switch err.(type) { case nil: @@ -191,14 +195,63 @@ func (l *loadbalancers) EnsureLoadBalancer(ctx context.Context, clusterName stri ctx = sentry.SetHubOnContext(ctx) sentry.SetTag(ctx, "cluster_name", clusterName) sentry.SetTag(ctx, "service", service.Name) + serviceNn := getServiceNn(service) + // Handle LoadBalancers backed by Cilium + if l.loadBalancerType == ciliumLBType { + klog.Infof("handling LoadBalancer Service %s as %s", serviceNn, ciliumLBClass) + + if err = l.ensureCiliumBGPPeeringPolicy(ctx); err != nil { + klog.Infof("Failed to ensure CiliumBGPPeeringPolicy: %v", err) + return nil, err + } + + // check for existing CiliumLoadBalancerIPPool for service + pool, err := l.getCiliumLBIPPool(ctx, service) + if err != nil && !k8serrors.IsNotFound(err) { + klog.Infof("Failed to get CiliumLoadBalancerIPPool: %s", err.Error()) + return nil, err + } + // if the CiliumLoadBalancerIPPool doesn't exist, it's not nil, instead an empty struct + // gets returned, so we check if this is so via the Name being empty + if pool != nil && pool.Name != "" { + klog.Infof("Cilium LB IP pool %s for Service %s ensured", pool.Name, serviceNn) + // ingress will be set by Cilium + return &v1.LoadBalancerStatus{ + Ingress: service.Status.LoadBalancer.Ingress, + }, nil + } + + var ipHolderSuffix string + if Options.IpHolderSuffix != "" { + ipHolderSuffix = Options.IpHolderSuffix + klog.Infof("using parameter-based IP Holder suffix %s for Service %s", ipHolderSuffix, serviceNn) + } + + // CiliumLoadBalancerIPPool does not yet exist for the service + var sharedIP string + if sharedIP, err = l.createSharedIP(ctx, nodes, ipHolderSuffix); err != nil { + klog.Errorf("Failed to request shared instance IP: %s", err.Error()) + return nil, err + } + if _, err = l.createCiliumLBIPPool(ctx, service, sharedIP); err != nil { + klog.Infof("Failed to create CiliumLoadBalancerIPPool: %s", err.Error()) + return nil, err + } + + // ingress will be set by Cilium + return &v1.LoadBalancerStatus{ + Ingress: service.Status.LoadBalancer.Ingress, + }, nil + } + + // Handle LoadBalancers backed by NodeBalancers var nb *linodego.NodeBalancer - serviceNn := getServiceNn(service) nb, err = l.getNodeBalancerForService(ctx, service) switch err.(type) { case lbNotFoundError: - if service.GetAnnotations()[annLinodeNodeBalancerID] != "" { + if service.GetAnnotations()[annotations.AnnLinodeNodeBalancerID] != "" { // a load balancer annotation has been created so a NodeBalancer is coming, error out and retry later klog.Infof("NodeBalancer created but not available yet, waiting...") sentry.CaptureError(ctx, err) @@ -235,276 +288,14 @@ func (l *loadbalancers) EnsureLoadBalancer(ctx context.Context, clusterName stri return lbStatus, nil } -// getNodeBalancerDeviceID gets the deviceID of the nodeBalancer that is attached to the firewall. -func (l *loadbalancers) getNodeBalancerDeviceID(ctx context.Context, firewallID, nbID int) (int, bool, error) { - devices, err := l.client.ListFirewallDevices(ctx, firewallID, &linodego.ListOptions{}) - if err != nil { - return 0, false, err - } - - if len(devices) == 0 { - return 0, false, nil - } - - for _, device := range devices { - if device.Entity.ID == nbID { - return device.ID, true, nil - } - } - - return 0, false, nil -} - -// Updates a service that has a firewallID annotation set. -// If an annotation is set, and the nodebalancer has a firewall that matches the ID, nothing to do -// If there's more than one firewall attached to the node-balancer, an error is returned as its not a supported use case. -// If there's only one firewall attached and it doesn't match what's in the annotation, the new firewall is attached and the old one removed -func (l *loadbalancers) updateFirewallwithID(ctx context.Context, service *v1.Service, nb *linodego.NodeBalancer) error { - var newFirewallID int - var err error - - fwID := service.GetAnnotations()[annLinodeCloudFirewallID] - newFirewallID, err = strconv.Atoi(fwID) - if err != nil { - return err - } - - // See if a firewall is attached to the nodebalancer first. - firewalls, err := l.client.ListNodeBalancerFirewalls(ctx, nb.ID, &linodego.ListOptions{}) - if err != nil { - return err - } - if len(firewalls) > 1 { - klog.Errorf("Found more than one firewall attached to nodebalancer: %d, firewall IDs: %v", nb.ID, firewalls) - return errTooManyFirewalls - } - - // get the ID of the firewall that is already attached to the nodeBalancer, if we have one. - var existingFirewallID int - if len(firewalls) == 1 { - existingFirewallID = firewalls[0].ID - } - - // if existing firewall and new firewall differs, attach the new firewall and remove the old. - if existingFirewallID != newFirewallID { - // attach new firewall. - _, err = l.client.CreateFirewallDevice(ctx, newFirewallID, linodego.FirewallDeviceCreateOptions{ - ID: nb.ID, - Type: "nodebalancer", - }) - if err != nil { - return err - } - // remove the existing firewall if it exists - if existingFirewallID != 0 { - deviceID, deviceExists, err := l.getNodeBalancerDeviceID(ctx, existingFirewallID, nb.ID) - if err != nil { - return err - } - - if !deviceExists { - return fmt.Errorf("Error in fetching attached nodeBalancer device") - } - - err = l.client.DeleteFirewallDevice(ctx, existingFirewallID, deviceID) - if err != nil { - return err - } - } - } - return nil -} - -func ipsChanged(ips *linodego.NetworkAddresses, rules []linodego.FirewallRule) bool { - var ruleIPv4s []string - var ruleIPv6s []string - - for _, rule := range rules { - if rule.Addresses.IPv4 != nil { - ruleIPv4s = append(ruleIPv4s, *rule.Addresses.IPv4...) - } - if rule.Addresses.IPv6 != nil { - ruleIPv6s = append(ruleIPv6s, *rule.Addresses.IPv6...) - } - } - - if len(ruleIPv4s) > 0 && ips.IPv4 == nil { - return true - } - - if len(ruleIPv6s) > 0 && ips.IPv6 == nil { - return true - } - - if ips.IPv4 != nil { - for _, ipv4 := range *ips.IPv4 { - if !slices.Contains(ruleIPv4s, ipv4) { - return true - } - } - } - - if ips.IPv6 != nil { - for _, ipv6 := range *ips.IPv6 { - if !slices.Contains(ruleIPv6s, ipv6) { - return true - } - } - } - - return false -} - -func firewallRuleChanged(old linodego.FirewallRuleSet, newACL aclConfig) bool { - var ips *linodego.NetworkAddresses - if newACL.AllowList != nil { - // this is a allowList, this means that the rules should have `DROP` as inboundpolicy - if old.InboundPolicy != "DROP" { - return true - } - if (newACL.AllowList.IPv4 != nil || newACL.AllowList.IPv6 != nil) && len(old.Inbound) == 0 { - return true - } - ips = newACL.AllowList - } - - if newACL.DenyList != nil { - if old.InboundPolicy != "ACCEPT" { - return true - } - - if (newACL.DenyList.IPv4 != nil || newACL.DenyList.IPv6 != nil) && len(old.Inbound) == 0 { - return true - } - ips = newACL.DenyList - } - - return ipsChanged(ips, old.Inbound) -} - -func (l *loadbalancers) updateFWwithACL(ctx context.Context, service *v1.Service, nb *linodego.NodeBalancer) error { - // See if a firewall is attached to the nodebalancer first. - firewalls, err := l.client.ListNodeBalancerFirewalls(ctx, nb.ID, &linodego.ListOptions{}) - if err != nil { - return err - } - - switch len(firewalls) { - case 0: - { - // need to create a fw and attach it to our nb - fwcreateOpts, err := l.createFirewallOptsForSvc(l.GetLoadBalancerName(ctx, "", service), l.getLoadBalancerTags(ctx, "", service), service) - if err != nil { - return err - } - - fw, err := l.client.CreateFirewall(ctx, *fwcreateOpts) - if err != nil { - return err - } - // attach new firewall. - _, err = l.client.CreateFirewallDevice(ctx, fw.ID, linodego.FirewallDeviceCreateOptions{ - ID: nb.ID, - Type: "nodebalancer", - }) - if err != nil { - return err - } - } - case 1: - { - // We do not want to get into the complexity of reconciling differences, might as well just pull what's in the svc annotation now and update the fw. - var acl aclConfig - err := json.Unmarshal([]byte(service.GetAnnotations()[annLinodeCloudFirewallACL]), &acl) - if err != nil { - return err - } - - changed := firewallRuleChanged(firewalls[0].Rules, acl) - if !changed { - return nil - } - - fwCreateOpts, err := l.createFirewallOptsForSvc(service.Name, []string{""}, service) - if err != nil { - return err - } - _, err = l.client.UpdateFirewallRules(ctx, firewalls[0].ID, fwCreateOpts.Rules) - if err != nil { - return err - } - } - default: - klog.Errorf("Found more than one firewall attached to nodebalancer: %d, firewall IDs: %v", nb.ID, firewalls) - return errTooManyFirewalls - } - return nil -} - -// updateNodeBalancerFirewall reconciles the firewall attached to the nodebalancer -// -// This function does the following -// 1. If a firewallID annotation is present, it checks if the nodebalancer has a firewall attached, and if it matches the annotationID -// a. If the IDs match, nothing to do here. -// b. If they don't match, the nb is attached to the new firewall and removed from the old one. -// 2. If a firewallACL annotation is present, -// a. it checks if the nodebalancer has a firewall attached, if a fw exists, it updates rules -// b. if a fw does not exist, it creates one -// 3. If neither of these annotations are present, -// a. AND if no firewalls are attached to the nodebalancer, nothing to do. -// b. if the NB has ONE firewall attached, remove it from nb, and clean up if nothing else is attached to it -// c. If there are more than one fw attached to it, then its a problem, return an err -// 4. If both these annotations are present, the firewallID takes precedence, and the ACL annotation is ignored. -// IF a user creates a fw ID externally, and then switches to using a ACL, the CCM will take over the fw that's attached to the nodebalancer. - -func (l *loadbalancers) updateNodeBalancerFirewall(ctx context.Context, service *v1.Service, nb *linodego.NodeBalancer) error { - // get the new firewall id from the annotation (if any). - _, fwIDExists := service.GetAnnotations()[annLinodeCloudFirewallID] - if fwIDExists { // If an ID exists, we ignore everything else and handle just that - return l.updateFirewallwithID(ctx, service, nb) - } - - // See if a acl exists - _, fwACLExists := service.GetAnnotations()[annLinodeCloudFirewallACL] - if fwACLExists { // if an ACL exists, but no ID, just update the ACL on the fw. - return l.updateFWwithACL(ctx, service, nb) - } - - // No firewall ID or ACL annotation, see if there are firewalls attached to our nb - firewalls, err := l.client.ListNodeBalancerFirewalls(ctx, nb.ID, &linodego.ListOptions{}) - if err != nil { - return err - } - - if len(firewalls) == 0 { - return nil - } - if len(firewalls) > 1 { - klog.Errorf("Found more than one firewall attached to nodebalancer: %d, firewall IDs: %v", nb.ID, firewalls) - return errTooManyFirewalls - } - - err = l.client.DeleteFirewallDevice(ctx, firewalls[0].ID, nb.ID) - if err != nil { - return err - } - // once we delete the device, we should see if there's anything attached to that firewall - devices, err := l.client.ListFirewallDevices(ctx, firewalls[0].ID, &linodego.ListOptions{}) - if err != nil { - return err - } - - if len(devices) == 0 { - // nothing attached to it, clean it up - return l.client.DeleteFirewall(ctx, firewalls[0].ID) - } - // else let that firewall linger, don't mess with it. - - return nil -} - //nolint:funlen -func (l *loadbalancers) updateNodeBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node, nb *linodego.NodeBalancer) (err error) { +func (l *loadbalancers) updateNodeBalancer( + ctx context.Context, + clusterName string, + service *v1.Service, + nodes []*v1.Node, + nb *linodego.NodeBalancer, +) (err error) { if len(nodes) == 0 { return fmt.Errorf("%w: service %s", errNoNodesAvailable, getServiceNn(service)) } @@ -520,7 +311,7 @@ func (l *loadbalancers) updateNodeBalancer(ctx context.Context, clusterName stri } } - tags := l.getLoadBalancerTags(ctx, clusterName, service) + tags := l.GetLoadBalancerTags(ctx, clusterName, service) if !reflect.DeepEqual(nb.Tags, tags) { update := nb.GetUpdateOptions() update.Tags = &tags @@ -531,7 +322,8 @@ func (l *loadbalancers) updateNodeBalancer(ctx context.Context, clusterName stri } } - err = l.updateNodeBalancerFirewall(ctx, service, nb) + fwClient := firewall.LinodeClient{Client: l.client} + err = fwClient.UpdateNodeBalancerFirewall(ctx, l.GetLoadBalancerName(ctx, clusterName, service), tags, service, nb) if err != nil { return err } @@ -564,12 +356,6 @@ func (l *loadbalancers) updateNodeBalancer(ctx context.Context, clusterName stri return err } - // Add all of the Nodes to the config - var newNBNodes []linodego.NodeBalancerNodeCreateOptions - for _, node := range nodes { - newNBNodes = append(newNBNodes, l.buildNodeBalancerNodeCreateOptions(node, port.NodePort)) - } - // Look for an existing config for this port var currentNBCfg *linodego.NodeBalancerConfig for i := range nbCfgs { @@ -579,6 +365,34 @@ func (l *loadbalancers) updateNodeBalancer(ctx context.Context, clusterName stri break } } + oldNBNodeIDs := make(map[string]int) + if currentNBCfg != nil { + // Obtain list of current NB nodes and convert it to map of node IDs + currentNBNodes, err := l.client.ListNodeBalancerNodes(ctx, nb.ID, currentNBCfg.ID, nil) + if err != nil { + // This error can be ignored, because if we fail to get nodes we can anyway rebuild the config from scratch, + // it would just cause the NB to reload config even if the node list did not change, so we prefer to send IDs when it is posible. + klog.Warningf("Unable to list existing nodebalancer nodes for NB %d config %d, error: %s", nb.ID, newNBCfg.ID, err) + } + for _, node := range currentNBNodes { + oldNBNodeIDs[node.Address] = node.ID + } + klog.Infof("Nodebalancer %d had nodes %v", nb.ID, oldNBNodeIDs) + } else { + klog.Infof("No preexisting nodebalancer for port %v found.", port.Port) + } + // Add all of the Nodes to the config + newNBNodes := make([]linodego.NodeBalancerConfigRebuildNodeOptions, 0, len(nodes)) + for _, node := range nodes { + newNodeOpts := l.buildNodeBalancerNodeConfigRebuildOptions(node, port.NodePort) + oldNodeID, ok := oldNBNodeIDs[newNodeOpts.Address] + if ok { + newNodeOpts.ID = oldNodeID + } else { + klog.Infof("No preexisting node id for %v found.", newNodeOpts.Address) + } + newNBNodes = append(newNBNodes, newNodeOpts) + } // If there's no existing config, create it var rebuildOpts linodego.NodeBalancerConfigRebuildOptions @@ -617,6 +431,25 @@ func (l *loadbalancers) UpdateLoadBalancer(ctx context.Context, clusterName stri sentry.SetTag(ctx, "cluster_name", clusterName) sentry.SetTag(ctx, "service", service.Name) + // handle LoadBalancers backed by Cilium + if l.loadBalancerType == ciliumLBType { + klog.Infof("handling update for LoadBalancer Service %s/%s as %s", service.Namespace, service.Name, ciliumLBClass) + serviceNn := getServiceNn(service) + var ipHolderSuffix string + if Options.IpHolderSuffix != "" { + ipHolderSuffix = Options.IpHolderSuffix + klog.V(3).Infof("using parameter-based IP Holder suffix %s for Service %s", ipHolderSuffix, serviceNn) + } + + // make sure that IPs are shared properly on the Node if using load-balancers not backed by NodeBalancers + for _, node := range nodes { + if err := l.handleIPSharing(ctx, node, ipHolderSuffix); err != nil { + return err + } + } + return nil + } + // UpdateLoadBalancer is invoked with a nil LoadBalancerStatus; we must fetch the latest // status for NodeBalancer discovery. serviceWithStatus := service.DeepCopy() @@ -663,7 +496,7 @@ func (l *loadbalancers) deleteUnusedConfigs(ctx context.Context, nbConfigs []lin // shouldPreserveNodeBalancer determines whether a NodeBalancer should be deleted based on the // service's preserve annotation. func (l *loadbalancers) shouldPreserveNodeBalancer(service *v1.Service) bool { - return getServiceBoolAnnotation(service, annLinodeLoadBalancerPreserve) + return getServiceBoolAnnotation(service, annotations.AnnLinodeLoadBalancerPreserve) } // EnsureLoadBalancerDeleted deletes the specified loadbalancer if it exists. @@ -676,6 +509,23 @@ func (l *loadbalancers) EnsureLoadBalancerDeleted(ctx context.Context, clusterNa sentry.SetTag(ctx, "cluster_name", clusterName) sentry.SetTag(ctx, "service", service.Name) + // Handle LoadBalancers backed by Cilium + if l.loadBalancerType == ciliumLBType { + klog.Infof("handling LoadBalancer Service %s/%s as %s", service.Namespace, service.Name, ciliumLBClass) + if err := l.deleteSharedIP(ctx, service); err != nil { + return err + } + // delete CiliumLoadBalancerIPPool for service + if err := l.deleteCiliumLBIPPool(ctx, service); err != nil && !k8serrors.IsNotFound(err) { + klog.Infof("Failed to delete CiliumLoadBalancerIPPool") + return err + } + + return nil + } + + // Handle LoadBalancers backed by NodeBalancers + serviceNn := getServiceNn(service) if len(service.Status.LoadBalancer.Ingress) == 0 { @@ -699,10 +549,20 @@ func (l *loadbalancers) EnsureLoadBalancerDeleted(ctx context.Context, clusterNa } if l.shouldPreserveNodeBalancer(service) { - klog.Infof("short-circuiting deletion of NodeBalancer (%d) for service (%s) as annotated with %s", nb.ID, serviceNn, annLinodeLoadBalancerPreserve) + klog.Infof( + "short-circuiting deletion of NodeBalancer (%d) for service (%s) as annotated with %s", + nb.ID, + serviceNn, + annotations.AnnLinodeLoadBalancerPreserve, + ) return nil } + fwClient := firewall.LinodeClient{Client: l.client} + if err = fwClient.DeleteNodeBalancerFirewall(ctx, service, nb); err != nil { + return err + } + if err = l.client.DeleteNodeBalancer(ctx, nb.ID); err != nil { klog.Errorf("failed to delete NodeBalancer (%d) for service (%s): %s", nb.ID, serviceNn, err) sentry.CaptureError(ctx, err) @@ -751,13 +611,15 @@ func (l *loadbalancers) getNodeBalancerByID(ctx context.Context, service *v1.Ser return nb, nil } -func (l *loadbalancers) getLoadBalancerTags(_ context.Context, clusterName string, service *v1.Service) []string { +func (l *loadbalancers) GetLoadBalancerTags(_ context.Context, clusterName string, service *v1.Service) []string { tags := []string{} if clusterName != "" { tags = append(tags, clusterName) } - tagStr, ok := service.GetAnnotations()[annLinodeLoadBalancerTags] + tags = append(tags, Options.NodeBalancerTags...) + + tagStr, ok := service.GetAnnotations()[annotations.AnnLinodeLoadBalancerTags] if ok { return append(tags, strings.Split(tagStr, ",")...) } @@ -765,77 +627,11 @@ func (l *loadbalancers) getLoadBalancerTags(_ context.Context, clusterName strin return tags } -// processACL takes the IPs, aclType, label etc and formats them into the passed linodego.FirewallCreateOptions pointer. -func processACL(fwcreateOpts *linodego.FirewallCreateOptions, aclType, label, svcName, ports string, ips linodego.NetworkAddresses) { - ruleLabel := fmt.Sprintf("%s-%s", aclType, svcName) - if len(ruleLabel) > maxFirewallRuleLabelLen { - newLabel := ruleLabel[0:maxFirewallRuleLabelLen] - klog.Infof("Firewall label '%s' is too long. Stripping to '%s'", ruleLabel, newLabel) - ruleLabel = newLabel - } - - fwcreateOpts.Rules.Inbound = append(fwcreateOpts.Rules.Inbound, linodego.FirewallRule{ - Action: aclType, - Label: ruleLabel, - Description: fmt.Sprintf("Created by linode-ccm: %s, for %s", label, svcName), - Protocol: linodego.TCP, // Nodebalancers support only TCP. - Ports: ports, - Addresses: ips, - }) - fwcreateOpts.Rules.OutboundPolicy = "ACCEPT" - if aclType == "ACCEPT" { - // if an allowlist is present, we drop everything else. - fwcreateOpts.Rules.InboundPolicy = "DROP" - } else { - // if a denylist is present, we accept everything else. - fwcreateOpts.Rules.InboundPolicy = "ACCEPT" - } -} - -type aclConfig struct { - AllowList *linodego.NetworkAddresses `json:"allowList"` - DenyList *linodego.NetworkAddresses `json:"denyList"` -} - -func (l *loadbalancers) createFirewallOptsForSvc(label string, tags []string, svc *v1.Service) (*linodego.FirewallCreateOptions, error) { - // Fetch acl from annotation - aclString := svc.GetAnnotations()[annLinodeCloudFirewallACL] - fwcreateOpts := linodego.FirewallCreateOptions{ - Label: label, - Tags: tags, - } - servicePorts := make([]string, 0, len(svc.Spec.Ports)) - for _, port := range svc.Spec.Ports { - servicePorts = append(servicePorts, strconv.Itoa(int(port.Port))) - } - - portsString := strings.Join(servicePorts[:], ",") - var acl aclConfig - err := json.Unmarshal([]byte(aclString), &acl) - if err != nil { - return nil, err - } - // it is a problem if both are set, or if both are not set - if (acl.AllowList != nil && acl.DenyList != nil) || (acl.AllowList == nil && acl.DenyList == nil) { - return nil, errInvalidFWConfig - } - - aclType := "ACCEPT" - allowedIPs := acl.AllowList - if acl.DenyList != nil { - aclType = "DROP" - allowedIPs = acl.DenyList - } - - processACL(&fwcreateOpts, aclType, label, svc.Name, portsString, *allowedIPs) - return &fwcreateOpts, nil -} - func (l *loadbalancers) createNodeBalancer(ctx context.Context, clusterName string, service *v1.Service, configs []*linodego.NodeBalancerConfigCreateOptions) (lb *linodego.NodeBalancer, err error) { connThrottle := getConnectionThrottle(service) label := l.GetLoadBalancerName(ctx, clusterName, service) - tags := l.getLoadBalancerTags(ctx, clusterName, service) + tags := l.GetLoadBalancerTags(ctx, clusterName, service) createOpts := linodego.NodeBalancerCreateOptions{ Label: &label, Region: l.zone, @@ -844,7 +640,7 @@ func (l *loadbalancers) createNodeBalancer(ctx context.Context, clusterName stri Tags: tags, } - fwid, ok := service.GetAnnotations()[annLinodeCloudFirewallID] + fwid, ok := service.GetAnnotations()[annotations.AnnLinodeCloudFirewallID] if ok { firewallID, err := strconv.Atoi(fwid) if err != nil { @@ -853,18 +649,18 @@ func (l *loadbalancers) createNodeBalancer(ctx context.Context, clusterName stri createOpts.FirewallID = firewallID } else { // There's no firewallID already set, see if we need to create a new fw, look for the acl annotation. - _, ok := service.GetAnnotations()[annLinodeCloudFirewallACL] + _, ok := service.GetAnnotations()[annotations.AnnLinodeCloudFirewallACL] if ok { - fwcreateOpts, err := l.createFirewallOptsForSvc(label, tags, service) + fwcreateOpts, err := firewall.CreateFirewallOptsForSvc(label, tags, service) if err != nil { return nil, err } - firewall, err := l.client.CreateFirewall(ctx, *fwcreateOpts) + fw, err := l.client.CreateFirewall(ctx, *fwcreateOpts) if err != nil { return nil, err } - createOpts.FirewallID = firewall.ID + createOpts.FirewallID = fw.ID } // no need to deal with firewalls, continue creating nb's } @@ -872,14 +668,6 @@ func (l *loadbalancers) createNodeBalancer(ctx context.Context, clusterName stri return l.client.CreateNodeBalancer(ctx, createOpts) } -func (l *loadbalancers) createFirewall(ctx context.Context, opts linodego.FirewallCreateOptions) (fw *linodego.Firewall, err error) { - return l.client.CreateFirewall(ctx, opts) -} - -func (l *loadbalancers) deleteFirewall(ctx context.Context, firewall *linodego.Firewall) error { - return l.client.DeleteFirewall(ctx, firewall.ID) -} - //nolint:funlen func (l *loadbalancers) buildNodeBalancerConfig(ctx context.Context, service *v1.Service, port int) (linodego.NodeBalancerConfig, error) { portConfig, err := getPortConfig(service, port) @@ -900,7 +688,7 @@ func (l *loadbalancers) buildNodeBalancerConfig(ctx context.Context, service *v1 } if health == linodego.CheckHTTP || health == linodego.CheckHTTPBody { - path := service.GetAnnotations()[annLinodeCheckPath] + path := service.GetAnnotations()[annotations.AnnLinodeCheckPath] if path == "" { path = "/" } @@ -908,14 +696,14 @@ func (l *loadbalancers) buildNodeBalancerConfig(ctx context.Context, service *v1 } if health == linodego.CheckHTTPBody { - body := service.GetAnnotations()[annLinodeCheckBody] + body := service.GetAnnotations()[annotations.AnnLinodeCheckBody] if body == "" { - return config, fmt.Errorf("for health check type http_body need body regex annotation %v", annLinodeCheckBody) + return config, fmt.Errorf("for health check type http_body need body regex annotation %v", annotations.AnnLinodeCheckBody) } config.CheckBody = body } checkInterval := 5 - if ci, ok := service.GetAnnotations()[annLinodeHealthCheckInterval]; ok { + if ci, ok := service.GetAnnotations()[annotations.AnnLinodeHealthCheckInterval]; ok { if checkInterval, err = strconv.Atoi(ci); err != nil { return config, err } @@ -923,7 +711,7 @@ func (l *loadbalancers) buildNodeBalancerConfig(ctx context.Context, service *v1 config.CheckInterval = checkInterval checkTimeout := 3 - if ct, ok := service.GetAnnotations()[annLinodeHealthCheckTimeout]; ok { + if ct, ok := service.GetAnnotations()[annotations.AnnLinodeHealthCheckTimeout]; ok { if checkTimeout, err = strconv.Atoi(ct); err != nil { return config, err } @@ -931,7 +719,7 @@ func (l *loadbalancers) buildNodeBalancerConfig(ctx context.Context, service *v1 config.CheckTimeout = checkTimeout checkAttempts := 2 - if ca, ok := service.GetAnnotations()[annLinodeHealthCheckAttempts]; ok { + if ca, ok := service.GetAnnotations()[annotations.AnnLinodeHealthCheckAttempts]; ok { if checkAttempts, err = strconv.Atoi(ca); err != nil { return config, err } @@ -939,7 +727,7 @@ func (l *loadbalancers) buildNodeBalancerConfig(ctx context.Context, service *v1 config.CheckAttempts = checkAttempts checkPassive := true - if cp, ok := service.GetAnnotations()[annLinodeHealthCheckPassive]; ok { + if cp, ok := service.GetAnnotations()[annotations.AnnLinodeHealthCheckPassive]; ok { if checkPassive, err = strconv.ParseBool(cp); err != nil { return config, err } @@ -989,7 +777,7 @@ func (l *loadbalancers) buildLoadBalancerRequest(ctx context.Context, clusterNam createOpt := config.GetCreateOptions() for _, n := range nodes { - createOpt.Nodes = append(createOpt.Nodes, l.buildNodeBalancerNodeCreateOptions(n, port.NodePort)) + createOpt.Nodes = append(createOpt.Nodes, l.buildNodeBalancerNodeConfigRebuildOptions(n, port.NodePort).NodeBalancerNodeCreateOptions) } configs = append(configs, &createOpt) @@ -1009,14 +797,16 @@ func coerceString(s string, minLen, maxLen int, padding string) string { return s } -func (l *loadbalancers) buildNodeBalancerNodeCreateOptions(node *v1.Node, nodePort int32) linodego.NodeBalancerNodeCreateOptions { - return linodego.NodeBalancerNodeCreateOptions{ - Address: fmt.Sprintf("%v:%v", getNodePrivateIP(node), nodePort), - // NodeBalancer backends must be 3-32 chars in length - // If < 3 chars, pad node name with "node-" prefix - Label: coerceString(node.Name, 3, 32, "node-"), - Mode: "accept", - Weight: 100, +func (l *loadbalancers) buildNodeBalancerNodeConfigRebuildOptions(node *v1.Node, nodePort int32) linodego.NodeBalancerConfigRebuildNodeOptions { + return linodego.NodeBalancerConfigRebuildNodeOptions{ + NodeBalancerNodeCreateOptions: linodego.NodeBalancerNodeCreateOptions{ + Address: fmt.Sprintf("%v:%v", getNodePrivateIP(node), nodePort), + // NodeBalancer backends must be 3-32 chars in length + // If < 3 chars, pad node name with "node-" prefix + Label: coerceString(node.Name, 3, 32, "node-"), + Mode: "accept", + Weight: 100, + }, } } @@ -1060,7 +850,7 @@ func getPortConfig(service *v1.Service, port int) (portConfig, error) { protocol := portConfigAnnotation.Protocol if protocol == "" { protocol = "tcp" - if p, ok := service.GetAnnotations()[annLinodeDefaultProtocol]; ok { + if p, ok := service.GetAnnotations()[annotations.AnnLinodeDefaultProtocol]; ok { protocol = p } } @@ -1069,7 +859,7 @@ func getPortConfig(service *v1.Service, port int) (portConfig, error) { proxyProtocol := portConfigAnnotation.ProxyProtocol if proxyProtocol == "" { proxyProtocol = string(linodego.ProxyProtocolNone) - for _, ann := range []string{annLinodeDefaultProxyProtocol, annLinodeProxyProtocolDeprecated} { + for _, ann := range []string{annotations.AnnLinodeDefaultProxyProtocol, annLinodeProxyProtocolDeprecated} { if pp, ok := service.GetAnnotations()[ann]; ok { proxyProtocol = pp break @@ -1097,19 +887,19 @@ func getPortConfig(service *v1.Service, port int) (portConfig, error) { } func getHealthCheckType(service *v1.Service) (linodego.ConfigCheck, error) { - hType, ok := service.GetAnnotations()[annLinodeHealthCheckType] + hType, ok := service.GetAnnotations()[annotations.AnnLinodeHealthCheckType] if !ok { return linodego.CheckConnection, nil } if hType != "none" && hType != "connection" && hType != "http" && hType != "http_body" { - return "", fmt.Errorf("invalid health check type: %q specified in annotation: %q", hType, annLinodeHealthCheckType) + return "", fmt.Errorf("invalid health check type: %q specified in annotation: %q", hType, annotations.AnnLinodeHealthCheckType) } return linodego.ConfigCheck(hType), nil } func getPortConfigAnnotation(service *v1.Service, port int) (portConfigAnnotation, error) { annotation := portConfigAnnotation{} - annotationKey := annLinodePortConfigPrefix + strconv.Itoa(port) + annotationKey := annotations.AnnLinodePortConfigPrefix + strconv.Itoa(port) annotationJSON, ok := service.GetAnnotations()[annotationKey] if !ok { @@ -1129,10 +919,11 @@ func getPortConfigAnnotation(service *v1.Service, port int) (portConfigAnnotatio // network, this will not be the NodeInternalIP, so this prefers an annotation // cluster operators may specify in such a situation. func getNodePrivateIP(node *v1.Node) string { - if address, exists := node.Annotations[annLinodeNodePrivateIP]; exists { + if address, exists := node.Annotations[annotations.AnnLinodeNodePrivateIP]; exists { return address } + klog.Infof("Node %s, assigned IP addresses: %v", node.Name, node.Status.Addresses) for _, addr := range node.Status.Addresses { if addr.Type == v1.NodeInternalIP { return addr.Address @@ -1162,9 +953,9 @@ func getTLSCertInfo(ctx context.Context, kubeClient kubernetes.Interface, namesp } func getConnectionThrottle(service *v1.Service) int { - connThrottle := 20 + connThrottle := 0 // disable throttle if nothing is specified - if connThrottleString := service.GetAnnotations()[annLinodeThrottle]; connThrottleString != "" { + if connThrottleString := service.GetAnnotations()[annotations.AnnLinodeThrottle]; connThrottleString != "" { parsed, err := strconv.Atoi(connThrottleString) if err == nil { if parsed < 0 { @@ -1185,7 +976,7 @@ func makeLoadBalancerStatus(service *v1.Service, nb *linodego.NodeBalancer) *v1. ingress := v1.LoadBalancerIngress{ Hostname: *nb.Hostname, } - if !getServiceBoolAnnotation(service, annLinodeHostnameOnlyIngress) { + if !getServiceBoolAnnotation(service, annotations.AnnLinodeHostnameOnlyIngress) { if val := envBoolOptions("LINODE_HOSTNAME_ONLY_INGRESS"); val { klog.Infof("LINODE_HOSTNAME_ONLY_INGRESS: (%v)", val) } else { diff --git a/cloud/linode/loadbalancers_test.go b/cloud/linode/loadbalancers_test.go index 2747f85a..6657b35f 100644 --- a/cloud/linode/loadbalancers_test.go +++ b/cloud/linode/loadbalancers_test.go @@ -2,12 +2,17 @@ package linode import ( "context" + cryptoRand "crypto/rand" + "encoding/hex" + "encoding/json" stderrors "errors" "fmt" + "math/rand" "net/http" "net/http/httptest" "os" "reflect" + "regexp" "strconv" "strings" "testing" @@ -20,6 +25,9 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/fake" + + "github.com/linode/linode-cloud-controller-manager/cloud/annotations" + "github.com/linode/linode-cloud-controller-manager/cloud/linode/firewall" ) const testCert string = `-----BEGIN CERTIFICATE----- @@ -142,6 +150,14 @@ func TestCCMLoadBalancers(t *testing.T) { name: "Create Load Balancer With Invalid Firewall ACL - NO Allow Or Deny", f: testCreateNodeBalanceWithNoAllowOrDenyList, }, + { + name: "Create Load Balancer With Global Tags set", + f: testCreateNodeBalancerWithGlobalTags, + }, + { + name: "Update Load Balancer - Add Node", + f: testUpdateLoadBalancerAddNode, + }, { name: "Update Load Balancer - Add Annotation", f: testUpdateLoadBalancerAddAnnotation, @@ -176,7 +192,11 @@ func TestCCMLoadBalancers(t *testing.T) { }, { name: "Update Load Balancer - Delete Firewall ID", - f: testUpdateLoadBalancerDeleteFirewall, + f: testUpdateLoadBalancerDeleteFirewallRemoveID, + }, + { + name: "Update Load Balancer - Delete Firewall ACL", + f: testUpdateLoadBalancerDeleteFirewallRemoveACL, }, { name: "Update Load Balancer - Update Firewall ACL", @@ -258,14 +278,14 @@ func stubService(fake *fake.Clientset, service *v1.Service) { _, _ = fake.CoreV1().Services("").Create(context.TODO(), service, metav1.CreateOptions{}) } -func testCreateNodeBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI, annotations map[string]string) error { +func testCreateNodeBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI, annMap map[string]string, expectedTags []string) error { svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: randString(), UID: "foobar123", Annotations: map[string]string{ - annLinodeThrottle: "15", - annLinodeLoadBalancerTags: "fake,test,yolo", + annotations.AnnLinodeThrottle: "15", + annotations.AnnLinodeLoadBalancerTags: "fake,test,yolo", }, }, Spec: v1.ServiceSpec{ @@ -285,10 +305,10 @@ func testCreateNodeBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI, a }, }, } - for key, value := range annotations { + for key, value := range annMap { svc.Annotations[key] = value } - lb := &loadbalancers{client, "us-west", nil} + lb := newLoadbalancers(client, "us-west").(*loadbalancers) nodes := []*v1.Node{ {ObjectMeta: metav1.ObjectMeta{Name: "node-1"}}, } @@ -325,14 +345,16 @@ func testCreateNodeBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI, a t.Logf("actual: %v", nb.ClientConnThrottle) } - expectedTags := []string{"linodelb", "fake", "test", "yolo"} + if len(expectedTags) == 0 { + expectedTags = []string{"linodelb", "fake", "test", "yolo"} + } if !reflect.DeepEqual(nb.Tags, expectedTags) { t.Error("unexpected Tags") t.Logf("expected: %v", expectedTags) t.Logf("actual: %v", nb.Tags) } - _, ok := annotations[annLinodeCloudFirewallACL] + _, ok := annMap[annotations.AnnLinodeCloudFirewallACL] if ok { // a firewall was configured for this firewalls, err := client.ListNodeBalancerFirewalls(context.TODO(), nb.ID, &linodego.ListOptions{}) @@ -350,7 +372,7 @@ func testCreateNodeBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI, a } func testCreateNodeBalancerWithOutFirewall(t *testing.T, client *linodego.Client, f *fakeAPI) { - err := testCreateNodeBalancer(t, client, f, nil) + err := testCreateNodeBalancer(t, client, f, nil, nil) if err != nil { t.Fatalf("expected a nil error, got %v", err) } @@ -358,68 +380,72 @@ func testCreateNodeBalancerWithOutFirewall(t *testing.T, client *linodego.Client func testCreateNodeBalanceWithNoAllowOrDenyList(t *testing.T, client *linodego.Client, f *fakeAPI) { annotations := map[string]string{ - annLinodeCloudFirewallACL: `{}`, + annotations.AnnLinodeCloudFirewallACL: `{}`, } - err := testCreateNodeBalancer(t, client, f, annotations) - if err == nil || !stderrors.Is(err, errInvalidFWConfig) { - t.Fatalf("expected a %v error, got %v", errInvalidFWConfig, err) + err := testCreateNodeBalancer(t, client, f, annotations, nil) + if err == nil || !stderrors.Is(err, firewall.ErrInvalidFWConfig) { + t.Fatalf("expected a %v error, got %v", firewall.ErrInvalidFWConfig, err) } } func testCreateNodeBalanceWithBothAllowOrDenyList(t *testing.T, client *linodego.Client, f *fakeAPI) { annotations := map[string]string{ - annLinodeCloudFirewallACL: `{ + annotations.AnnLinodeCloudFirewallACL: `{ "allowList": { - "ipv4": ["2.2.2.2"] + "ipv4": ["2.2.2.2/32"], + "ipv6": ["2001:db8::/128"] }, "denyList": { - "ipv4": ["2.2.2.2"] + "ipv4": ["2.2.2.2/32"], + "ipv6": ["2001:db8::/128"] } }`, } - err := testCreateNodeBalancer(t, client, f, annotations) - if err == nil || !stderrors.Is(err, errInvalidFWConfig) { - t.Fatalf("expected a %v error, got %v", errInvalidFWConfig, err) + err := testCreateNodeBalancer(t, client, f, annotations, nil) + if err == nil || !stderrors.Is(err, firewall.ErrInvalidFWConfig) { + t.Fatalf("expected a %v error, got %v", firewall.ErrInvalidFWConfig, err) } } func testCreateNodeBalancerWithAllowList(t *testing.T, client *linodego.Client, f *fakeAPI) { annotations := map[string]string{ - annLinodeCloudFirewallACL: `{ + annotations.AnnLinodeCloudFirewallACL: `{ "allowList": { - "ipv4": ["2.2.2.2"] + "ipv4": ["2.2.2.2/32"], + "ipv6": ["2001:db8::/128"] } }`, } - err := testCreateNodeBalancer(t, client, f, annotations) + err := testCreateNodeBalancer(t, client, f, annotations, nil) if err != nil { - t.Fatalf("expected a non-nil error, got %v", err) + t.Fatalf("expected a nil error, got %v", err) } } func testCreateNodeBalancerWithDenyList(t *testing.T, client *linodego.Client, f *fakeAPI) { annotations := map[string]string{ - annLinodeCloudFirewallACL: `{ + annotations.AnnLinodeCloudFirewallACL: `{ "denyList": { - "ipv4": ["2.2.2.2"] + "ipv4": ["2.2.2.2/32"], + "ipv6": ["2001:db8::/128"] } }`, } - err := testCreateNodeBalancer(t, client, f, annotations) + err := testCreateNodeBalancer(t, client, f, annotations, nil) if err != nil { - t.Fatalf("expected a non-nil error, got %v", err) + t.Fatalf("expected a nil error, got %v", err) } } func testCreateNodeBalancerWithFirewall(t *testing.T, client *linodego.Client, f *fakeAPI) { annotations := map[string]string{ - annLinodeCloudFirewallID: "123", + annotations.AnnLinodeCloudFirewallID: "123", } - err := testCreateNodeBalancer(t, client, f, annotations) + err := testCreateNodeBalancer(t, client, f, annotations, nil) if err != nil { t.Fatalf("expected a nil error, got %v", err) } @@ -427,22 +453,224 @@ func testCreateNodeBalancerWithFirewall(t *testing.T, client *linodego.Client, f func testCreateNodeBalancerWithInvalidFirewall(t *testing.T, client *linodego.Client, f *fakeAPI) { annotations := map[string]string{ - annLinodeCloudFirewallID: "qwerty", + annotations.AnnLinodeCloudFirewallID: "qwerty", } expectedError := "strconv.Atoi: parsing \"qwerty\": invalid syntax" - err := testCreateNodeBalancer(t, client, f, annotations) + err := testCreateNodeBalancer(t, client, f, annotations, nil) if err.Error() != expectedError { t.Fatalf("expected a %s error, got %v", expectedError, err) } } +func testCreateNodeBalancerWithGlobalTags(t *testing.T, client *linodego.Client, f *fakeAPI) { + original := Options.NodeBalancerTags + defer func() { + Options.NodeBalancerTags = original + }() + Options.NodeBalancerTags = []string{"foobar"} + expectedTags := []string{"linodelb", "foobar", "fake", "test", "yolo"} + err := testCreateNodeBalancer(t, client, f, nil, expectedTags) + if err != nil { + t.Fatalf("expected a nil error, got %v", err) + } +} + +func testUpdateLoadBalancerAddNode(t *testing.T, client *linodego.Client, f *fakeAPI) { + svc := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: randString(), + UID: "foobar1234", + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{ + { + Name: randString(), + Protocol: "TCP", + Port: int32(80), + NodePort: int32(30000), + }, + }, + }, + } + + nodes1 := []*v1.Node{ + { + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + { + Type: v1.NodeInternalIP, + Address: "127.0.0.1", + }, + }, + }, + }, + } + + nodes2 := []*v1.Node{ + { + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + { + Type: v1.NodeInternalIP, + Address: "127.0.0.1", + }, + }, + }, + }, + { + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + { + Type: v1.NodeInternalIP, + Address: "127.0.0.2", + }, + }, + }, + }, + { + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + { + Type: v1.NodeInternalIP, + Address: "127.0.0.3", + }, + }, + }, + }, + } + + lb := newLoadbalancers(client, "us-west").(*loadbalancers) + fakeClientset := fake.NewSimpleClientset() + lb.kubeClient = fakeClientset + + defer func() { + _ = lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) + }() + + lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes1) + if err != nil { + t.Errorf("EnsureLoadBalancer returned an error %s", err) + } + svc.Status.LoadBalancer = *lbStatus + + stubService(fakeClientset, svc) + + f.ResetRequests() + + err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, nodes1) + if err != nil { + t.Errorf("UpdateLoadBalancer returned an error while updated LB to have one node: %s", err) + } + + rx := regexp.MustCompile("/nodebalancers/[0-9]+/configs/[0-9]+/rebuild") + checkIDs := func() (int, int) { + var req *fakeRequest + for request := range f.requests { + if rx.MatchString(request.Path) { + req = &request + break + } + } + + if req == nil { + t.Fatalf("Nodebalancer config rebuild request was not called.") + } + + var nbcro linodego.NodeBalancerConfigRebuildOptions + + if err := json.Unmarshal([]byte(req.Body), &nbcro); err != nil { + t.Fatalf("Unable to unmarshall request body %#v, error: %#v", req.Body, err) + } + + withIds := 0 + for i := range nbcro.Nodes { + if nbcro.Nodes[i].ID > 0 { + withIds++ + } + } + + return len(nbcro.Nodes), withIds + } + + nodecount, nodeswithIdcount := checkIDs() + if nodecount != 1 { + t.Fatalf("Unexpected node count (%d) in request on updating the nodebalancer with one node.", nodecount) + } + if nodeswithIdcount != 1 { + t.Fatalf("Expected Node ID to be set when updating the nodebalancer with the same node it had previously.") + } + + f.ResetRequests() + err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, nodes2) + if err != nil { + t.Errorf("UpdateLoadBalancer returned an error while updated LB to have three nodes: %s", err) + } + nodecount, nodeswithIdcount = checkIDs() + if nodecount != 3 { + t.Fatalf("Unexpected node count (%d) in request on updating the nodebalancer with three nodes.", nodecount) + } + if nodeswithIdcount != 1 { + t.Fatalf("Expected ID to be set just on one node which was existing prior updating the node with three nodes, it is set on %d nodes", nodeswithIdcount) + } + + f.ResetRequests() + err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, nodes2) + if err != nil { + t.Errorf("UpdateLoadBalancer returned an error while updated LB to have three nodes second time: %s", err) + } + nodecount, nodeswithIdcount = checkIDs() + if nodecount != 3 { + t.Fatalf("Unexpected node count (%d) in request on updating the nodebalancer with three nodes second time.", nodecount) + } + if nodeswithIdcount != 3 { + t.Fatalf("Expected ID to be set just on all three nodes when updating the NB with all three nodes which were pre-existing, instead it is set on %d nodes", nodeswithIdcount) + } + + nb, err := lb.getNodeBalancerByStatus(context.TODO(), svc) + if err != nil { + t.Fatalf("failed to get NodeBalancer via status: %s", err) + } + + cfgs, errConfigs := client.ListNodeBalancerConfigs(context.TODO(), nb.ID, nil) + if errConfigs != nil { + t.Fatalf("error getting NodeBalancer configs: %v", errConfigs) + } + + expectedPorts := map[int]struct{}{ + 80: {}, + } + + observedPorts := make(map[int]struct{}) + + for _, cfg := range cfgs { + nbnodes, errNodes := client.ListNodeBalancerNodes(context.TODO(), nb.ID, cfg.ID, nil) + if errNodes != nil { + t.Errorf("error getting NodeBalancer nodes: %v", errNodes) + } + + for _, node := range nbnodes { + t.Logf("Node %#v", node) + } + + if len(nbnodes) != len(nodes2) { + t.Errorf("Expected %d nodes for port %d, got %d (%#v)", len(nodes2), cfg.Port, len(nbnodes), nbnodes) + } + + observedPorts[cfg.Port] = struct{}{} + } + + if !reflect.DeepEqual(expectedPorts, observedPorts) { + t.Errorf("NodeBalancer ports mismatch: expected %#v, got %#v", expectedPorts, observedPorts) + } +} + func testUpdateLoadBalancerAddAnnotation(t *testing.T, client *linodego.Client, _ *fakeAPI) { svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: randString(), UID: "foobar123", Annotations: map[string]string{ - annLinodeThrottle: "15", + annotations.AnnLinodeThrottle: "15", }, }, Spec: v1.ServiceSpec{ @@ -470,7 +698,7 @@ func testUpdateLoadBalancerAddAnnotation(t *testing.T, client *linodego.Client, }, } - lb := &loadbalancers{client, "us-west", nil} + lb := newLoadbalancers(client, "us-west").(*loadbalancers) fakeClientset := fake.NewSimpleClientset() lb.kubeClient = fakeClientset @@ -486,7 +714,7 @@ func testUpdateLoadBalancerAddAnnotation(t *testing.T, client *linodego.Client, stubService(fakeClientset, svc) svc.ObjectMeta.SetAnnotations(map[string]string{ - annLinodeThrottle: "10", + annotations.AnnLinodeThrottle: "10", }) err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, nodes) @@ -508,7 +736,7 @@ func testUpdateLoadBalancerAddAnnotation(t *testing.T, client *linodego.Client, func testUpdateLoadBalancerAddPortAnnotation(t *testing.T, client *linodego.Client, _ *fakeAPI) { targetTestPort := 80 - portConfigAnnotation := fmt.Sprintf("%s%d", annLinodePortConfigPrefix, targetTestPort) + portConfigAnnotation := fmt.Sprintf("%s%d", annotations.AnnLinodePortConfigPrefix, targetTestPort) svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: randString(), @@ -540,7 +768,7 @@ func testUpdateLoadBalancerAddPortAnnotation(t *testing.T, client *linodego.Clie }, } - lb := &loadbalancers{client, "us-west", nil} + lb := newLoadbalancers(client, "us-west").(*loadbalancers) fakeClientset := fake.NewSimpleClientset() lb.kubeClient = fakeClientset @@ -620,7 +848,7 @@ func testUpdateLoadBalancerAddTags(t *testing.T, client *linodego.Client, _ *fak }, } - lb := &loadbalancers{client, "us-west", nil} + lb := newLoadbalancers(client, "us-west").(*loadbalancers) fakeClientset := fake.NewSimpleClientset() lb.kubeClient = fakeClientset clusterName := "linodelb" @@ -638,7 +866,7 @@ func testUpdateLoadBalancerAddTags(t *testing.T, client *linodego.Client, _ *fak testTags := "test,new,tags" svc.ObjectMeta.SetAnnotations(map[string]string{ - annLinodeLoadBalancerTags: testTags, + annotations.AnnLinodeLoadBalancerTags: testTags, }) err = lb.UpdateLoadBalancer(context.TODO(), clusterName, svc, nodes) @@ -665,7 +893,7 @@ func testUpdateLoadBalancerAddTLSPort(t *testing.T, client *linodego.Client, _ * Name: randString(), UID: "foobar123", Annotations: map[string]string{ - annLinodeThrottle: "15", + annotations.AnnLinodeThrottle: "15", }, }, Spec: v1.ServiceSpec{ @@ -700,7 +928,7 @@ func testUpdateLoadBalancerAddTLSPort(t *testing.T, client *linodego.Client, _ * NodePort: int32(30001), } - lb := &loadbalancers{client, "us-west", nil} + lb := newLoadbalancers(client, "us-west").(*loadbalancers) defer func() { _ = lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) @@ -719,7 +947,7 @@ func testUpdateLoadBalancerAddTLSPort(t *testing.T, client *linodego.Client, _ * stubService(fakeClientset, svc) svc.Spec.Ports = append(svc.Spec.Ports, extraPort) svc.ObjectMeta.SetAnnotations(map[string]string{ - annLinodePortConfigPrefix + "443": `{ "protocol": "https", "tls-secret-name": "tls-secret"}`, + annotations.AnnLinodePortConfigPrefix + "443": `{ "protocol": "https", "tls-secret-name": "tls-secret"}`, }) err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, nodes) if err != nil { @@ -775,7 +1003,7 @@ func testUpdateLoadBalancerAddProxyProtocol(t *testing.T, client *linodego.Clien }, } - lb := &loadbalancers{client, "us-west", nil} + lb := newLoadbalancers(client, "us-west").(*loadbalancers) fakeClientset := fake.NewSimpleClientset() lb.kubeClient = fakeClientset @@ -833,7 +1061,7 @@ func testUpdateLoadBalancerAddProxyProtocol(t *testing.T, client *linodego.Clien svc.Status.LoadBalancer = *makeLoadBalancerStatus(svc, nodeBalancer) svc.ObjectMeta.SetAnnotations(map[string]string{ - annLinodeDefaultProxyProtocol: string(tc.proxyProtocolConfig), + annotations.AnnLinodeDefaultProxyProtocol: string(tc.proxyProtocolConfig), }) stubService(fakeClientset, svc) @@ -870,7 +1098,7 @@ func testUpdateLoadBalancerAddNewFirewall(t *testing.T, client *linodego.Client, Name: randString(), UID: "foobar123", Annotations: map[string]string{ - annLinodeThrottle: "15", + annotations.AnnLinodeThrottle: "15", }, }, Spec: v1.ServiceSpec{ @@ -898,7 +1126,7 @@ func testUpdateLoadBalancerAddNewFirewall(t *testing.T, client *linodego.Client, }, } - lb := &loadbalancers{client, "us-west", nil} + lb := newLoadbalancers(client, "us-west").(*loadbalancers) fakeClientset := fake.NewSimpleClientset() lb.kubeClient = fakeClientset @@ -912,7 +1140,8 @@ func testUpdateLoadBalancerAddNewFirewall(t *testing.T, client *linodego.Client, } svc.Status.LoadBalancer = *lbStatus stubService(fakeClientset, svc) - firewall, err := lb.createFirewall(context.TODO(), linodego.FirewallCreateOptions{ + fwClient := firewall.LinodeClient{Client: client} + fw, err := fwClient.CreateFirewall(context.TODO(), linodego.FirewallCreateOptions{ Label: "test", Rules: linodego.FirewallRuleSet{Inbound: []linodego.FirewallRule{{ Action: "ACCEPT", @@ -929,11 +1158,11 @@ func testUpdateLoadBalancerAddNewFirewall(t *testing.T, client *linodego.Client, t.Errorf("CreatingFirewall returned an error: %s", err) } defer func() { - _ = lb.deleteFirewall(context.TODO(), firewall) + _ = fwClient.DeleteFirewall(context.TODO(), fw) }() svc.ObjectMeta.SetAnnotations(map[string]string{ - annLinodeCloudFirewallID: strconv.Itoa(firewall.ID), + annotations.AnnLinodeCloudFirewallID: strconv.Itoa(fw.ID), }) err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, nodes) @@ -955,11 +1184,12 @@ func testUpdateLoadBalancerAddNewFirewall(t *testing.T, client *linodego.Client, t.Fatalf("No attached firewalls found") } - if firewalls[0].ID != firewall.ID { + if firewalls[0].ID != fw.ID { t.Fatalf("Attached firewallID not matching with created firewall") } } +// This will also test the firewall with >255 IPs func testUpdateLoadBalancerAddNewFirewallACL(t *testing.T, client *linodego.Client, fakeAPI *fakeAPI) { svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ @@ -991,7 +1221,7 @@ func testUpdateLoadBalancerAddNewFirewallACL(t *testing.T, client *linodego.Clie }, } - lb := &loadbalancers{client, "us-west", nil} + lb := newLoadbalancers(client, "us-west").(*loadbalancers) fakeClientset := fake.NewSimpleClientset() lb.kubeClient = fakeClientset @@ -1019,12 +1249,43 @@ func testUpdateLoadBalancerAddNewFirewallACL(t *testing.T, client *linodego.Clie t.Fatalf("Firewalls attached when none specified") } + var ipv4s []string + var ipv6s []string + i := 0 + for i < 400 { + ipv4s = append(ipv4s, fmt.Sprintf("%d.%d.%d.%d", 192, rand.Int31n(255), rand.Int31n(255), rand.Int31n(255))) + i += 1 + } + i = 0 + for i < 300 { + ip := make([]byte, 16) + if _, err := cryptoRand.Read(ip); err != nil { + t.Fatalf("unable to read random bytes") + } + ipv6s = append(ipv6s, fmt.Sprintf("%s:%s:%s:%s:%s:%s:%s:%s", + hex.EncodeToString(ip[0:2]), + hex.EncodeToString(ip[2:4]), + hex.EncodeToString(ip[4:6]), + hex.EncodeToString(ip[6:8]), + hex.EncodeToString(ip[8:10]), + hex.EncodeToString(ip[10:12]), + hex.EncodeToString(ip[12:14]), + hex.EncodeToString(ip[14:16]))) + i += 1 + } + acl := map[string]map[string][]string{ + "allowList": { + "ipv4": ipv4s, + "ipv6": ipv6s, + }, + } + aclString, err := json.Marshal(acl) + if err != nil { + t.Fatalf("unable to marshal json acl") + } + svc.ObjectMeta.SetAnnotations(map[string]string{ - annLinodeCloudFirewallACL: `{ - "allowList": { - "ipv4": ["2.2.2.2"] - } - }`, + annotations.AnnLinodeCloudFirewallACL: string(aclString), }) err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, nodes) @@ -1050,9 +1311,101 @@ func testUpdateLoadBalancerAddNewFirewallACL(t *testing.T, client *linodego.Clie t.Errorf("expected DROP inbound policy, got %s", firewallsNew[0].Rules.InboundPolicy) } - fwIPs := firewallsNew[0].Rules.Inbound[0].Addresses.IPv4 + if len(firewallsNew[0].Rules.Inbound) != 4 { + t.Errorf("expected 4 rules, got %d", len(firewallsNew[0].Rules.Inbound)) + } +} + +func testUpdateLoadBalancerDeleteFirewallRemoveACL(t *testing.T, client *linodego.Client, fakeAPI *fakeAPI) { + svc := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: randString(), + UID: "foobar123", + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{ + { + Name: randString(), + Protocol: "TCP", + Port: int32(80), + NodePort: int32(30000), + }, + }, + }, + } + + nodes := []*v1.Node{ + { + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + { + Type: v1.NodeInternalIP, + Address: "127.0.0.1", + }, + }, + }, + }, + } + + lb := newLoadbalancers(client, "us-west").(*loadbalancers) + fakeClientset := fake.NewSimpleClientset() + lb.kubeClient = fakeClientset + + svc.ObjectMeta.SetAnnotations(map[string]string{ + annotations.AnnLinodeCloudFirewallACL: `{ + "allowList": { + "ipv4": ["2.2.2.2"] + } + }`, + }) + + defer func() { + _ = lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) + }() + lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + if err != nil { + t.Errorf("EnsureLoadBalancer returned an error: %s", err) + } + svc.Status.LoadBalancer = *lbStatus + stubService(fakeClientset, svc) + + nb, err := lb.getNodeBalancerByStatus(context.TODO(), svc) + if err != nil { + t.Fatalf("failed to get NodeBalancer via status: %s", err) + } + + firewalls, err := lb.client.ListNodeBalancerFirewalls(context.TODO(), nb.ID, &linodego.ListOptions{}) + if err != nil { + t.Fatalf("Failed to list nodeBalancer firewalls %s", err) + } + + if len(firewalls) == 0 { + t.Fatalf("No firewalls attached") + } + + if firewalls[0].Rules.InboundPolicy != "DROP" { + t.Errorf("expected DROP inbound policy, got %s", firewalls[0].Rules.InboundPolicy) + } + + fwIPs := firewalls[0].Rules.Inbound[0].Addresses.IPv4 if fwIPs == nil { - t.Errorf("expected 2.2.2.2, got %v", fwIPs) + t.Errorf("expected IP, got %v", fwIPs) + } + + svc.ObjectMeta.SetAnnotations(map[string]string{}) + + err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, nodes) + if err != nil { + t.Errorf("UpdateLoadBalancer returned an error: %s", err) + } + + firewallsNew, err := lb.client.ListNodeBalancerFirewalls(context.TODO(), nb.ID, &linodego.ListOptions{}) + if err != nil { + t.Fatalf("failed to List Firewalls %s", err) + } + + if len(firewallsNew) != 0 { + t.Fatalf("firewall's %d still attached", firewallsNew[0].ID) } } @@ -1087,12 +1440,12 @@ func testUpdateLoadBalancerUpdateFirewallRemoveACLaddID(t *testing.T, client *li }, } - lb := &loadbalancers{client, "us-west", nil} + lb := newLoadbalancers(client, "us-west").(*loadbalancers) fakeClientset := fake.NewSimpleClientset() lb.kubeClient = fakeClientset svc.ObjectMeta.SetAnnotations(map[string]string{ - annLinodeCloudFirewallACL: `{ + annotations.AnnLinodeCloudFirewallACL: `{ "allowList": { "ipv4": ["2.2.2.2"] } @@ -1132,7 +1485,8 @@ func testUpdateLoadBalancerUpdateFirewallRemoveACLaddID(t *testing.T, client *li t.Errorf("expected IP, got %v", fwIPs) } - firewall, err := lb.createFirewall(context.TODO(), linodego.FirewallCreateOptions{ + fwClient := firewall.LinodeClient{Client: client} + fw, err := fwClient.CreateFirewall(context.TODO(), linodego.FirewallCreateOptions{ Label: "test", Rules: linodego.FirewallRuleSet{Inbound: []linodego.FirewallRule{{ Action: "ACCEPT", @@ -1149,11 +1503,11 @@ func testUpdateLoadBalancerUpdateFirewallRemoveACLaddID(t *testing.T, client *li t.Errorf("Error creating firewall %s", err) } defer func() { - _ = lb.deleteFirewall(context.TODO(), firewall) + _ = fwClient.DeleteFirewall(context.TODO(), fw) }() svc.ObjectMeta.SetAnnotations(map[string]string{ - annLinodeCloudFirewallID: strconv.Itoa(firewall.ID), + annotations.AnnLinodeCloudFirewallID: strconv.Itoa(fw.ID), }) err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, nodes) @@ -1184,7 +1538,7 @@ func testUpdateLoadBalancerUpdateFirewallRemoveACLaddID(t *testing.T, client *li t.Errorf("expected 2.2.2.2, got %v", fwIPs) } - if firewallsNew[0].ID != firewall.ID { + if firewallsNew[0].ID != fw.ID { t.Errorf("Firewall ID does not match what we created, something wrong.") } } @@ -1220,11 +1574,12 @@ func testUpdateLoadBalancerUpdateFirewallRemoveIDaddACL(t *testing.T, client *li }, } - lb := &loadbalancers{client, "us-west", nil} + lb := newLoadbalancers(client, "us-west").(*loadbalancers) fakeClientset := fake.NewSimpleClientset() lb.kubeClient = fakeClientset - firewall, err := lb.createFirewall(context.TODO(), linodego.FirewallCreateOptions{ + fwClient := firewall.LinodeClient{Client: client} + fw, err := fwClient.CreateFirewall(context.TODO(), linodego.FirewallCreateOptions{ Label: "test", Rules: linodego.FirewallRuleSet{Inbound: []linodego.FirewallRule{{ Action: "ACCEPT", @@ -1241,11 +1596,11 @@ func testUpdateLoadBalancerUpdateFirewallRemoveIDaddACL(t *testing.T, client *li t.Errorf("Error creating firewall %s", err) } defer func() { - _ = lb.deleteFirewall(context.TODO(), firewall) + _ = fwClient.DeleteFirewall(context.TODO(), fw) }() svc.ObjectMeta.SetAnnotations(map[string]string{ - annLinodeCloudFirewallID: strconv.Itoa(firewall.ID), + annotations.AnnLinodeCloudFirewallID: strconv.Itoa(fw.ID), }) defer func() { @@ -1281,7 +1636,7 @@ func testUpdateLoadBalancerUpdateFirewallRemoveIDaddACL(t *testing.T, client *li t.Errorf("expected IP, got %v", fwIPs) } svc.ObjectMeta.SetAnnotations(map[string]string{ - annLinodeCloudFirewallACL: `{ + annotations.AnnLinodeCloudFirewallACL: `{ "allowList": { "ipv4": ["2.2.2.2"] } @@ -1316,7 +1671,7 @@ func testUpdateLoadBalancerUpdateFirewallRemoveIDaddACL(t *testing.T, client *li t.Errorf("expected 2.2.2.2, got %v", fwIPs) } - if firewallsNew[0].ID != firewall.ID { + if firewallsNew[0].ID != fw.ID { t.Errorf("Firewall ID does not match, something wrong.") } } @@ -1327,9 +1682,9 @@ func testUpdateLoadBalancerUpdateFirewallACL(t *testing.T, client *linodego.Clie Name: randString(), UID: "foobar123", Annotations: map[string]string{ - annLinodeCloudFirewallACL: `{ + annotations.AnnLinodeCloudFirewallACL: `{ "allowList": { - "ipv4": ["2.2.2.2"] + "ipv4": ["2.2.2.2/32", "3.3.3.3/32"] } }`, }, @@ -1359,7 +1714,7 @@ func testUpdateLoadBalancerUpdateFirewallACL(t *testing.T, client *linodego.Clie }, } - lb := &loadbalancers{client, "us-west", nil} + lb := newLoadbalancers(client, "us-west").(*loadbalancers) fakeClientset := fake.NewSimpleClientset() lb.kubeClient = fakeClientset @@ -1393,16 +1748,17 @@ func testUpdateLoadBalancerUpdateFirewallACL(t *testing.T, client *linodego.Clie fwIPs := firewalls[0].Rules.Inbound[0].Addresses.IPv4 if fwIPs == nil { - t.Errorf("expected 2.2.2.2, got %v", fwIPs) + t.Errorf("expected ips, got %v", fwIPs) } fmt.Printf("got %v", fwIPs) + // Add ipv6 ips in allowList svc.ObjectMeta.SetAnnotations(map[string]string{ - annLinodeCloudFirewallACL: `{ + annotations.AnnLinodeCloudFirewallACL: `{ "allowList": { - "ipv4": ["2.2.2.2"], - "ipv6": ["dead:beef::/128"] + "ipv4": ["2.2.2.2/32", "3.3.3.3/32"], + "ipv6": ["dead:beef::/128", "dead:bee::/128"] } }`, }) @@ -1431,6 +1787,98 @@ func testUpdateLoadBalancerUpdateFirewallACL(t *testing.T, client *linodego.Clie t.Errorf("expected non nil IPv4, got %v", fwIPs) } + if len(*fwIPs) != 2 { + t.Errorf("expected two IPv4 ips, got %v", fwIPs) + } + + if firewallsNew[0].Rules.Inbound[0].Addresses.IPv6 == nil { + t.Errorf("expected non nil IPv6, got %v", firewallsNew[0].Rules.Inbound[0].Addresses.IPv6) + } + + if len(*firewallsNew[0].Rules.Inbound[0].Addresses.IPv6) != 2 { + t.Errorf("expected two IPv6 ips, got %v", firewallsNew[0].Rules.Inbound[0].Addresses.IPv6) + } + + // Update ips in allowList + svc.ObjectMeta.SetAnnotations(map[string]string{ + annotations.AnnLinodeCloudFirewallACL: `{ + "allowList": { + "ipv4": ["2.2.2.1/32", "3.3.3.3/32"], + "ipv6": ["dead::/128", "dead:bee::/128"] + } + }`, + }) + + err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, nodes) + if err != nil { + t.Errorf("UpdateLoadBalancer returned an error: %s", err) + } + + nbUpdated, err = lb.getNodeBalancerByStatus(context.TODO(), svc) + if err != nil { + t.Fatalf("failed to get NodeBalancer via status: %s", err) + } + + firewallsNew, err = lb.client.ListNodeBalancerFirewalls(context.TODO(), nbUpdated.ID, &linodego.ListOptions{}) + if err != nil { + t.Fatalf("failed to List Firewalls %s", err) + } + + if len(firewallsNew) == 0 { + t.Fatalf("No attached firewalls found") + } + + fwIPs = firewallsNew[0].Rules.Inbound[0].Addresses.IPv4 + if fwIPs == nil { + t.Errorf("expected non nil IPv4, got %v", fwIPs) + } + + if len(*fwIPs) != 2 { + t.Errorf("expected two IPv4 ips, got %v", fwIPs) + } + + if firewallsNew[0].Rules.Inbound[0].Addresses.IPv6 == nil { + t.Errorf("expected non nil IPv6, got %v", firewallsNew[0].Rules.Inbound[0].Addresses.IPv6) + } + + if len(*firewallsNew[0].Rules.Inbound[0].Addresses.IPv6) != 2 { + t.Errorf("expected two IPv6 ips, got %v", firewallsNew[0].Rules.Inbound[0].Addresses.IPv6) + } + + // remove one ipv4 and one ipv6 ip from allowList + svc.ObjectMeta.SetAnnotations(map[string]string{ + annotations.AnnLinodeCloudFirewallACL: `{ + "allowList": { + "ipv4": ["3.3.3.3/32"], + "ipv6": ["dead:beef::/128"] + } + }`, + }) + + err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, nodes) + if err != nil { + t.Errorf("UpdateLoadBalancer returned an error: %s", err) + } + + nbUpdated, err = lb.getNodeBalancerByStatus(context.TODO(), svc) + if err != nil { + t.Fatalf("failed to get NodeBalancer via status: %s", err) + } + + firewallsNew, err = lb.client.ListNodeBalancerFirewalls(context.TODO(), nbUpdated.ID, &linodego.ListOptions{}) + if err != nil { + t.Fatalf("failed to List Firewalls %s", err) + } + + if len(firewallsNew) == 0 { + t.Fatalf("No attached firewalls found") + } + + fwIPs = firewallsNew[0].Rules.Inbound[0].Addresses.IPv4 + if fwIPs == nil { + t.Errorf("expected non nil IPv4, got %v", fwIPs) + } + if len(*fwIPs) != 1 { t.Errorf("expected one IPv4, got %v", fwIPs) } @@ -1442,6 +1890,12 @@ func testUpdateLoadBalancerUpdateFirewallACL(t *testing.T, client *linodego.Clie if len(*firewallsNew[0].Rules.Inbound[0].Addresses.IPv6) != 1 { t.Errorf("expected one IPv6, got %v", firewallsNew[0].Rules.Inbound[0].Addresses.IPv6) } + + // Run update with same ACL + err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, nodes) + if err != nil { + t.Errorf("UpdateLoadBalancer returned an error: %s", err) + } } func testUpdateLoadBalancerUpdateFirewall(t *testing.T, client *linodego.Client, fakeAPI *fakeAPI) { @@ -1464,7 +1918,7 @@ func testUpdateLoadBalancerUpdateFirewall(t *testing.T, client *linodego.Client, Name: randString(), UID: "foobar123", Annotations: map[string]string{ - annLinodeThrottle: "15", + annotations.AnnLinodeThrottle: "15", }, }, Spec: v1.ServiceSpec{ @@ -1492,7 +1946,7 @@ func testUpdateLoadBalancerUpdateFirewall(t *testing.T, client *linodego.Client, }, } - lb := &loadbalancers{client, "us-west", nil} + lb := newLoadbalancers(client, "us-west").(*loadbalancers) fakeClientset := fake.NewSimpleClientset() lb.kubeClient = fakeClientset @@ -1500,16 +1954,17 @@ func testUpdateLoadBalancerUpdateFirewall(t *testing.T, client *linodego.Client, _ = lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) }() - firewall, err := lb.createFirewall(context.TODO(), firewallCreateOpts) + fwClient := firewall.LinodeClient{Client: client} + fw, err := fwClient.CreateFirewall(context.TODO(), firewallCreateOpts) if err != nil { t.Errorf("Error creating firewall %s", err) } defer func() { - _ = lb.deleteFirewall(context.TODO(), firewall) + _ = fwClient.DeleteFirewall(context.TODO(), fw) }() svc.ObjectMeta.SetAnnotations(map[string]string{ - annLinodeCloudFirewallID: strconv.Itoa(firewall.ID), + annotations.AnnLinodeCloudFirewallID: strconv.Itoa(fw.ID), }) lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) if err != nil { @@ -1532,21 +1987,21 @@ func testUpdateLoadBalancerUpdateFirewall(t *testing.T, client *linodego.Client, t.Fatalf("No firewalls attached") } - if firewall.ID != firewalls[0].ID { + if fw.ID != firewalls[0].ID { t.Fatalf("Attached firewallID not matching with created firewall") } firewallCreateOpts.Label = "test2" - firewallNew, err := lb.createFirewall(context.TODO(), firewallCreateOpts) + firewallNew, err := fwClient.CreateFirewall(context.TODO(), firewallCreateOpts) if err != nil { t.Fatalf("Error in creating firewall %s", err) } defer func() { - _ = lb.deleteFirewall(context.TODO(), firewallNew) + _ = fwClient.DeleteFirewall(context.TODO(), firewallNew) }() svc.ObjectMeta.SetAnnotations(map[string]string{ - annLinodeCloudFirewallID: strconv.Itoa(firewallNew.ID), + annotations.AnnLinodeCloudFirewallID: strconv.Itoa(firewallNew.ID), }) err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, nodes) @@ -1573,7 +2028,7 @@ func testUpdateLoadBalancerUpdateFirewall(t *testing.T, client *linodego.Client, } } -func testUpdateLoadBalancerDeleteFirewall(t *testing.T, client *linodego.Client, fakeAPI *fakeAPI) { +func testUpdateLoadBalancerDeleteFirewallRemoveID(t *testing.T, client *linodego.Client, fakeAPI *fakeAPI) { firewallCreateOpts := linodego.FirewallCreateOptions{ Label: "test", Rules: linodego.FirewallRuleSet{Inbound: []linodego.FirewallRule{{ @@ -1618,7 +2073,7 @@ func testUpdateLoadBalancerDeleteFirewall(t *testing.T, client *linodego.Client, }, } - lb := &loadbalancers{client, "us-west", nil} + lb := newLoadbalancers(client, "us-west").(*loadbalancers) fakeClientset := fake.NewSimpleClientset() lb.kubeClient = fakeClientset @@ -1626,16 +2081,17 @@ func testUpdateLoadBalancerDeleteFirewall(t *testing.T, client *linodego.Client, _ = lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) }() - firewall, err := lb.createFirewall(context.TODO(), firewallCreateOpts) + fwClient := firewall.LinodeClient{Client: client} + fw, err := fwClient.CreateFirewall(context.TODO(), firewallCreateOpts) if err != nil { t.Errorf("Error in creating firewall %s", err) } defer func() { - _ = lb.deleteFirewall(context.TODO(), firewall) + _ = fwClient.DeleteFirewall(context.TODO(), fw) }() svc.ObjectMeta.SetAnnotations(map[string]string{ - annLinodeCloudFirewallID: strconv.Itoa(firewall.ID), + annotations.AnnLinodeCloudFirewallID: strconv.Itoa(fw.ID), }) lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) @@ -1659,7 +2115,7 @@ func testUpdateLoadBalancerDeleteFirewall(t *testing.T, client *linodego.Client, t.Fatalf("No firewalls attached") } - if firewall.ID != firewalls[0].ID { + if fw.ID != firewalls[0].ID { t.Fatalf("Attached firewallID not matching with created firewall") } @@ -1712,7 +2168,7 @@ func testUpdateLoadBalancerAddNodeBalancerID(t *testing.T, client *linodego.Clie }, } - lb := &loadbalancers{client, "us-west", nil} + lb := newLoadbalancers(client, "us-west").(*loadbalancers) defer func() { _ = lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) }() @@ -1738,14 +2194,15 @@ func testUpdateLoadBalancerAddNodeBalancerID(t *testing.T, client *linodego.Clie stubService(fakeClientset, svc) svc.ObjectMeta.SetAnnotations(map[string]string{ - annLinodeNodeBalancerID: strconv.Itoa(newNodeBalancer.ID), + annotations.AnnLinodeNodeBalancerID: strconv.Itoa(newNodeBalancer.ID), }) err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, nodes) if err != nil { t.Errorf("UpdateLoadBalancer returned an error while updated annotations: %s", err) } - lbStatus, _, err := lb.GetLoadBalancer(context.TODO(), svc.ClusterName, svc) + clusterName := strings.TrimPrefix(svc.Namespace, "kube-system-") + lbStatus, _, err := lb.GetLoadBalancer(context.TODO(), clusterName, svc) if err != nil { t.Errorf("GetLoadBalancer returned an error: %s", err) } @@ -1775,7 +2232,7 @@ func Test_getConnectionThrottle(t *testing.T) { Annotations: map[string]string{}, }, }, - 20, + 0, }, { "throttle value is a string", @@ -1784,11 +2241,11 @@ func Test_getConnectionThrottle(t *testing.T) { Name: randString(), UID: "abc123", Annotations: map[string]string{ - annLinodeThrottle: "foo", + annotations.AnnLinodeThrottle: "foo", }, }, }, - 20, + 0, }, { "throttle value is less than 0", @@ -1797,7 +2254,7 @@ func Test_getConnectionThrottle(t *testing.T) { Name: randString(), UID: "abc123", Annotations: map[string]string{ - annLinodeThrottle: "-123", + annotations.AnnLinodeThrottle: "-123", }, }, }, @@ -1810,7 +2267,7 @@ func Test_getConnectionThrottle(t *testing.T) { Name: randString(), UID: "abc123", Annotations: map[string]string{ - annLinodeThrottle: "1", + annotations.AnnLinodeThrottle: "1", }, }, }, @@ -1823,7 +2280,7 @@ func Test_getConnectionThrottle(t *testing.T) { Name: randString(), UID: "abc123", Annotations: map[string]string{ - annLinodeThrottle: "21", + annotations.AnnLinodeThrottle: "21", }, }, }, @@ -1867,7 +2324,7 @@ func Test_getPortConfig(t *testing.T) { Name: randString(), UID: "abc123", Annotations: map[string]string{ - annLinodeDefaultProxyProtocol: string(linodego.ProxyProtocolV2), + annotations.AnnLinodeDefaultProxyProtocol: string(linodego.ProxyProtocolV2), }, }, }, @@ -1881,8 +2338,8 @@ func Test_getPortConfig(t *testing.T) { Name: randString(), UID: "abc123", Annotations: map[string]string{ - annLinodeDefaultProxyProtocol: string(linodego.ProxyProtocolV2), - annLinodePortConfigPrefix + "443": fmt.Sprintf(`{"proxy-protocol": "%s"}`, linodego.ProxyProtocolV1), + annotations.AnnLinodeDefaultProxyProtocol: string(linodego.ProxyProtocolV2), + annotations.AnnLinodePortConfigPrefix + "443": fmt.Sprintf(`{"proxy-protocol": "%s"}`, linodego.ProxyProtocolV1), }, }, }, @@ -1896,7 +2353,7 @@ func Test_getPortConfig(t *testing.T) { Name: randString(), UID: "abc123", Annotations: map[string]string{ - annLinodeDefaultProxyProtocol: "invalid", + annotations.AnnLinodeDefaultProxyProtocol: "invalid", }, }, }, @@ -1922,7 +2379,7 @@ func Test_getPortConfig(t *testing.T) { Name: randString(), UID: "abc123", Annotations: map[string]string{ - annLinodeDefaultProtocol: "tcp", + annotations.AnnLinodeDefaultProtocol: "tcp", }, }, }, @@ -1936,7 +2393,7 @@ func Test_getPortConfig(t *testing.T) { Name: randString(), UID: "abc123", Annotations: map[string]string{ - annLinodeDefaultProtocol: "HTTP", + annotations.AnnLinodeDefaultProtocol: "HTTP", }, }, }, @@ -1950,7 +2407,7 @@ func Test_getPortConfig(t *testing.T) { Name: randString(), UID: "abc123", Annotations: map[string]string{ - annLinodeDefaultProtocol: "invalid", + annotations.AnnLinodeDefaultProtocol: "invalid", }, }, }, @@ -1964,8 +2421,8 @@ func Test_getPortConfig(t *testing.T) { Name: randString(), UID: "abc123", Annotations: map[string]string{ - annLinodeDefaultProtocol: "http", - annLinodePortConfigPrefix + "443": `{}`, + annotations.AnnLinodeDefaultProtocol: "http", + annotations.AnnLinodePortConfigPrefix + "443": `{}`, }, }, }, @@ -1979,7 +2436,7 @@ func Test_getPortConfig(t *testing.T) { Name: randString(), UID: "abc123", Annotations: map[string]string{ - annLinodePortConfigPrefix + "443": `{ "protocol": "HTTp" }`, + annotations.AnnLinodePortConfigPrefix + "443": `{ "protocol": "HTTp" }`, }, }, }, @@ -1993,7 +2450,7 @@ func Test_getPortConfig(t *testing.T) { Name: randString(), UID: "abc123", Annotations: map[string]string{ - annLinodePortConfigPrefix + "443": `{ "protocol": "invalid" }`, + annotations.AnnLinodePortConfigPrefix + "443": `{ "protocol": "invalid" }`, }, }, }, @@ -2048,7 +2505,7 @@ func Test_getHealthCheckType(t *testing.T) { Name: "test", UID: "abc123", Annotations: map[string]string{ - annLinodeHealthCheckType: "http", + annotations.AnnLinodeHealthCheckType: "http", }, }, }, @@ -2062,12 +2519,12 @@ func Test_getHealthCheckType(t *testing.T) { Name: "test", UID: "abc123", Annotations: map[string]string{ - annLinodeHealthCheckType: "invalid", + annotations.AnnLinodeHealthCheckType: "invalid", }, }, }, "", - fmt.Errorf("invalid health check type: %q specified in annotation: %q", "invalid", annLinodeHealthCheckType), + fmt.Errorf("invalid health check type: %q specified in annotation: %q", "invalid", annotations.AnnLinodeHealthCheckType), }, } @@ -2128,7 +2585,7 @@ func Test_getNodePrivateIP(t *testing.T) { &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ - annLinodeNodePrivateIP: "192.168.42.42", + annotations.AnnLinodeNodePrivateIP: "192.168.42.42", }, }, Status: v1.NodeStatus{ @@ -2162,7 +2619,7 @@ func testBuildLoadBalancerRequest(t *testing.T, client *linodego.Client, _ *fake Name: "test", UID: "foobar123", Annotations: map[string]string{ - annLinodeDefaultProtocol: "tcp", + annotations.AnnLinodeDefaultProtocol: "tcp", }, }, Spec: v1.ServiceSpec{ @@ -2194,7 +2651,7 @@ func testBuildLoadBalancerRequest(t *testing.T, client *linodego.Client, _ *fake }, } - lb := &loadbalancers{client, "us-west", nil} + lb := newLoadbalancers(client, "us-west").(*loadbalancers) nb, err := lb.buildLoadBalancerRequest(context.TODO(), "linodelb", svc, nodes) if err != nil { t.Fatal(err) @@ -2241,7 +2698,7 @@ func testEnsureLoadBalancerPreserveAnnotation(t *testing.T, client *linodego.Cli }, } - lb := &loadbalancers{client, "us-west", nil} + lb := newLoadbalancers(client, "us-west").(*loadbalancers) for _, test := range []struct { name string deleted bool @@ -2249,17 +2706,17 @@ func testEnsureLoadBalancerPreserveAnnotation(t *testing.T, client *linodego.Cli }{ { name: "load balancer preserved", - annotations: map[string]string{annLinodeLoadBalancerPreserve: "true"}, + annotations: map[string]string{annotations.AnnLinodeLoadBalancerPreserve: "true"}, deleted: false, }, { name: "load balancer not preserved (deleted)", - annotations: map[string]string{annLinodeLoadBalancerPreserve: "false"}, + annotations: map[string]string{annotations.AnnLinodeLoadBalancerPreserve: "false"}, deleted: true, }, { name: "invalid value treated as false (deleted)", - annotations: map[string]string{annLinodeLoadBalancerPreserve: "bogus"}, + annotations: map[string]string{annotations.AnnLinodeLoadBalancerPreserve: "bogus"}, deleted: true, }, } { @@ -2301,7 +2758,7 @@ func testEnsureLoadBalancerDeleted(t *testing.T, client *linodego.Client, fake * Name: "test", UID: "foobar123", Annotations: map[string]string{ - annLinodeDefaultProtocol: "tcp", + annotations.AnnLinodeDefaultProtocol: "tcp", }, }, Spec: v1.ServiceSpec{ @@ -2335,7 +2792,7 @@ func testEnsureLoadBalancerDeleted(t *testing.T, client *linodego.Client, fake * Name: "notexists", UID: "notexists123", Annotations: map[string]string{ - annLinodeDefaultProtocol: "tcp", + annotations.AnnLinodeDefaultProtocol: "tcp", }, }, Spec: v1.ServiceSpec{ @@ -2353,7 +2810,7 @@ func testEnsureLoadBalancerDeleted(t *testing.T, client *linodego.Client, fake * }, } - lb := &loadbalancers{client, "us-west", nil} + lb := newLoadbalancers(client, "us-west").(*loadbalancers) configs := []*linodego.NodeBalancerConfigCreateOptions{} _, err := lb.createNodeBalancer(context.TODO(), "linodelb", svc, configs) if err != nil { @@ -2379,8 +2836,8 @@ func testEnsureExistingLoadBalancer(t *testing.T, client *linodego.Client, _ *fa Name: "testensure", UID: "foobar123", Annotations: map[string]string{ - annLinodeDefaultProtocol: "tcp", - annLinodePortConfigPrefix + "8443": `{ "protocol": "https", "tls-secret-name": "tls-secret"}`, + annotations.AnnLinodeDefaultProtocol: "tcp", + annotations.AnnLinodePortConfigPrefix + "8443": `{ "protocol": "https", "tls-secret-name": "tls-secret"}`, }, }, Spec: v1.ServiceSpec{ @@ -2401,7 +2858,7 @@ func testEnsureExistingLoadBalancer(t *testing.T, client *linodego.Client, _ *fa }, } - lb := &loadbalancers{client, "us-west", nil} + lb := newLoadbalancers(client, "us-west").(*loadbalancers) lb.kubeClient = fake.NewSimpleClientset() addTLSSecret(t, lb.kubeClient) @@ -2525,11 +2982,11 @@ func testMakeLoadBalancerStatus(t *testing.T, client *linodego.Client, _ *fakeAP t.Errorf("expected status for basic service to be %#v; got %#v", expectedStatus, status) } - svc.Annotations[annLinodeHostnameOnlyIngress] = "true" + svc.Annotations[annotations.AnnLinodeHostnameOnlyIngress] = "true" expectedStatus.Ingress[0] = v1.LoadBalancerIngress{Hostname: hostname} status = makeLoadBalancerStatus(svc, nb) if !reflect.DeepEqual(status, expectedStatus) { - t.Errorf("expected status for %q annotated service to be %#v; got %#v", annLinodeHostnameOnlyIngress, expectedStatus, status) + t.Errorf("expected status for %q annotated service to be %#v; got %#v", annotations.AnnLinodeHostnameOnlyIngress, expectedStatus, status) } } @@ -2563,21 +3020,21 @@ func testMakeLoadBalancerStatusEnvVar(t *testing.T, client *linodego.Client, _ * expectedStatus.Ingress[0] = v1.LoadBalancerIngress{Hostname: hostname} status = makeLoadBalancerStatus(svc, nb) if !reflect.DeepEqual(status, expectedStatus) { - t.Errorf("expected status for %q annotated service to be %#v; got %#v", annLinodeHostnameOnlyIngress, expectedStatus, status) + t.Errorf("expected status for %q annotated service to be %#v; got %#v", annotations.AnnLinodeHostnameOnlyIngress, expectedStatus, status) } t.Setenv("LINODE_HOSTNAME_ONLY_INGRESS", "false") expectedStatus.Ingress[0] = v1.LoadBalancerIngress{Hostname: hostname} status = makeLoadBalancerStatus(svc, nb) if reflect.DeepEqual(status, expectedStatus) { - t.Errorf("expected status for %q annotated service to be %#v; got %#v", annLinodeHostnameOnlyIngress, expectedStatus, status) + t.Errorf("expected status for %q annotated service to be %#v; got %#v", annotations.AnnLinodeHostnameOnlyIngress, expectedStatus, status) } t.Setenv("LINODE_HOSTNAME_ONLY_INGRESS", "banana") expectedStatus.Ingress[0] = v1.LoadBalancerIngress{Hostname: hostname} status = makeLoadBalancerStatus(svc, nb) if reflect.DeepEqual(status, expectedStatus) { - t.Errorf("expected status for %q annotated service to be %#v; got %#v", annLinodeHostnameOnlyIngress, expectedStatus, status) + t.Errorf("expected status for %q annotated service to be %#v; got %#v", annotations.AnnLinodeHostnameOnlyIngress, expectedStatus, status) } os.Unsetenv("LINODE_HOSTNAME_ONLY_INGRESS") } @@ -2597,12 +3054,12 @@ func testCleanupDoesntCall(t *testing.T, client *linodego.Client, fakeAPI *fakeA svcAnn := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "test", - Annotations: map[string]string{annLinodeNodeBalancerID: strconv.Itoa(nb2.ID)}, + Annotations: map[string]string{annotations.AnnLinodeNodeBalancerID: strconv.Itoa(nb2.ID)}, }, } svc.Status.LoadBalancer = *makeLoadBalancerStatus(svc, nb1) svcAnn.Status.LoadBalancer = *makeLoadBalancerStatus(svcAnn, nb1) - lb := &loadbalancers{client, region, nil} + lb := newLoadbalancers(client, region).(*loadbalancers) fakeAPI.ResetRequests() t.Run("non-annotated service shouldn't call the API during cleanup", func(t *testing.T) { @@ -2649,7 +3106,7 @@ func testUpdateLoadBalancerNoNodes(t *testing.T, client *linodego.Client, _ *fak }, } - lb := &loadbalancers{client, "us-west", nil} + lb := newLoadbalancers(client, "us-west").(*loadbalancers) defer func() { _ = lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) }() @@ -2666,7 +3123,7 @@ func testUpdateLoadBalancerNoNodes(t *testing.T, client *linodego.Client, _ *fak svc.Status.LoadBalancer = *makeLoadBalancerStatus(svc, nodeBalancer) stubService(fakeClientset, svc) svc.ObjectMeta.SetAnnotations(map[string]string{ - annLinodeNodeBalancerID: strconv.Itoa(nodeBalancer.ID), + annotations.AnnLinodeNodeBalancerID: strconv.Itoa(nodeBalancer.ID), }) // setup done, test ensure/update @@ -2682,7 +3139,7 @@ func testUpdateLoadBalancerNoNodes(t *testing.T, client *linodego.Client, _ *fak } func testGetNodeBalancerForServiceIDDoesNotExist(t *testing.T, client *linodego.Client, _ *fakeAPI) { - lb := &loadbalancers{client, "us-west", nil} + lb := newLoadbalancers(client, "us-west").(*loadbalancers) bogusNodeBalancerID := "123456" svc := &v1.Service{ @@ -2690,7 +3147,7 @@ func testGetNodeBalancerForServiceIDDoesNotExist(t *testing.T, client *linodego. Name: "test", UID: "foobar123", Annotations: map[string]string{ - annLinodeNodeBalancerID: bogusNodeBalancerID, + annotations.AnnLinodeNodeBalancerID: bogusNodeBalancerID, }, }, Spec: v1.ServiceSpec{ @@ -2721,7 +3178,7 @@ func testGetNodeBalancerForServiceIDDoesNotExist(t *testing.T, client *linodego. } func testEnsureNewLoadBalancerWithNodeBalancerID(t *testing.T, client *linodego.Client, _ *fakeAPI) { - lb := &loadbalancers{client, "us-west", nil} + lb := newLoadbalancers(client, "us-west").(*loadbalancers) nodeBalancer, err := client.CreateNodeBalancer(context.TODO(), linodego.NodeBalancerCreateOptions{ Region: lb.zone, }) @@ -2734,7 +3191,7 @@ func testEnsureNewLoadBalancerWithNodeBalancerID(t *testing.T, client *linodego. Name: "testensure", UID: "foobar123", Annotations: map[string]string{ - annLinodeNodeBalancerID: strconv.Itoa(nodeBalancer.ID), + annotations.AnnLinodeNodeBalancerID: strconv.Itoa(nodeBalancer.ID), }, }, Spec: v1.ServiceSpec{ @@ -2778,8 +3235,8 @@ func testEnsureNewLoadBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI Name: "testensure", UID: "foobar123", Annotations: map[string]string{ - annLinodeDefaultProtocol: "tcp", - annLinodePortConfigPrefix + "8443": `{ "protocol": "https", "tls-secret-name": "tls-secret"}`, + annotations.AnnLinodeDefaultProtocol: "tcp", + annotations.AnnLinodePortConfigPrefix + "8443": `{ "protocol": "https", "tls-secret-name": "tls-secret"}`, }, }, Spec: v1.ServiceSpec{ @@ -2815,7 +3272,7 @@ func testEnsureNewLoadBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI }, }, } - lb := &loadbalancers{client, "us-west", nil} + lb := newLoadbalancers(client, "us-west").(*loadbalancers) lb.kubeClient = fake.NewSimpleClientset() addTLSSecret(t, lb.kubeClient) @@ -2828,13 +3285,13 @@ func testEnsureNewLoadBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI } func testGetLoadBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI) { - lb := &loadbalancers{client, "us-west", nil} + lb := newLoadbalancers(client, "us-west").(*loadbalancers) svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "test", UID: "foobar123", Annotations: map[string]string{ - annLinodeDefaultProtocol: "tcp", + annotations.AnnLinodeDefaultProtocol: "tcp", }, }, Spec: v1.ServiceSpec{ @@ -2881,7 +3338,7 @@ func testGetLoadBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI) { Name: "notexists", UID: "notexists123", Annotations: map[string]string{ - annLinodeDefaultProtocol: "tcp", + annotations.AnnLinodeDefaultProtocol: "tcp", }, }, Spec: v1.ServiceSpec{ @@ -2943,7 +3400,7 @@ func Test_getPortConfigAnnotation(t *testing.T) { }{ { name: "Test single port annotation", - ann: map[string]string{annLinodePortConfigPrefix + "443": `{ "tls-secret-name": "prod-app-tls", "protocol": "https" }`}, + ann: map[string]string{annotations.AnnLinodePortConfigPrefix + "443": `{ "tls-secret-name": "prod-app-tls", "protocol": "https" }`}, expected: portConfigAnnotation{ TLSSecretName: "prod-app-tls", Protocol: "https", @@ -2953,8 +3410,8 @@ func Test_getPortConfigAnnotation(t *testing.T) { { name: "Test multiple port annotation", ann: map[string]string{ - annLinodePortConfigPrefix + "443": `{ "tls-secret-name": "prod-app-tls", "protocol": "https" }`, - annLinodePortConfigPrefix + "80": `{ "protocol": "http" }`, + annotations.AnnLinodePortConfigPrefix + "443": `{ "tls-secret-name": "prod-app-tls", "protocol": "https" }`, + annotations.AnnLinodePortConfigPrefix + "80": `{ "protocol": "http" }`, }, expected: portConfigAnnotation{ TLSSecretName: "prod-app-tls", @@ -2973,7 +3430,7 @@ func Test_getPortConfigAnnotation(t *testing.T) { { name: "Test invalid json", ann: map[string]string{ - annLinodePortConfigPrefix + "443": `{ "tls-secret-name": "prod-app-tls" `, + annotations.AnnLinodePortConfigPrefix + "443": `{ "tls-secret-name": "prod-app-tls" `, }, expected: portConfigAnnotation{}, err: "unexpected end of JSON input", diff --git a/cloud/linode/metrics.go b/cloud/linode/metrics.go new file mode 100644 index 00000000..a447dfdb --- /dev/null +++ b/cloud/linode/metrics.go @@ -0,0 +1,17 @@ +package linode + +import ( + "sync" + + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client" + + "k8s.io/component-base/metrics/legacyregistry" +) + +var registerOnce sync.Once + +func registerMetrics() { + registerOnce.Do(func() { + legacyregistry.RawMustRegister(client.ClientMethodCounterVec) + }) +} diff --git a/cloud/linode/node_controller.go b/cloud/linode/node_controller.go index d8be9eda..365e4da0 100644 --- a/cloud/linode/node_controller.go +++ b/cloud/linode/node_controller.go @@ -18,12 +18,20 @@ import ( "k8s.io/client-go/util/retry" "k8s.io/client-go/util/workqueue" "k8s.io/klog/v2" + + "github.com/linode/linode-cloud-controller-manager/cloud/annotations" + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client" +) + +const ( + informerResyncPeriod = 1 * time.Minute + defaultMetadataTTL = 300 * time.Second ) type nodeController struct { sync.RWMutex - client Client + client client.Client instances *instances kubeclient kubernetes.Interface informer v1informers.NodeInformer @@ -31,40 +39,54 @@ type nodeController struct { metadataLastUpdate map[string]time.Time ttl time.Duration - queue workqueue.DelayingInterface + queue workqueue.TypedDelayingInterface[any] } -func newNodeController(kubeclient kubernetes.Interface, client Client, informer v1informers.NodeInformer) *nodeController { - timeout := 300 +func newNodeController(kubeclient kubernetes.Interface, client client.Client, informer v1informers.NodeInformer, instanceCache *instances) *nodeController { + timeout := defaultMetadataTTL if raw, ok := os.LookupEnv("LINODE_METADATA_TTL"); ok { if t, _ := strconv.Atoi(raw); t > 0 { - timeout = t + timeout = time.Duration(t) * time.Second } } return &nodeController{ client: client, - instances: newInstances(client), + instances: instanceCache, kubeclient: kubeclient, informer: informer, - ttl: time.Duration(timeout) * time.Second, + ttl: timeout, metadataLastUpdate: make(map[string]time.Time), - queue: workqueue.NewDelayingQueue(), + queue: workqueue.NewTypedDelayingQueueWithConfig[any](workqueue.TypedDelayingQueueConfig[any]{Name: "ccm_node"}), } } func (s *nodeController) Run(stopCh <-chan struct{}) { - s.informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - node, ok := obj.(*v1.Node) - if !ok { - return - } - - klog.Infof("NodeController will handle newly created node (%s) metadata", node.Name) - s.queue.Add(node) + if _, err := s.informer.Informer().AddEventHandlerWithResyncPeriod( + cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + node, ok := obj.(*v1.Node) + if !ok { + return + } + + klog.Infof("NodeController will handle newly created node (%s) metadata", node.Name) + s.queue.Add(node) + }, + UpdateFunc: func(oldObj, newObj interface{}) { + node, ok := newObj.(*v1.Node) + if !ok { + return + } + + klog.Infof("NodeController will handle newly updated node (%s) metadata", node.Name) + s.queue.Add(node) + }, }, - }) + informerResyncPeriod, + ); err != nil { + klog.Errorf("NodeController can't handle newly created node's metadata. %s", err) + } go wait.Until(s.worker, time.Second, stopCh) s.informer.Informer().Run(stopCh) @@ -120,44 +142,72 @@ func (s *nodeController) SetLastMetadataUpdate(nodeName string) { } func (s *nodeController) handleNode(ctx context.Context, node *v1.Node) error { - klog.Infof("NodeController handling node (%s) metadata", node.Name) + klog.V(3).InfoS("NodeController handling node metadata", + "node", klog.KObj(node)) lastUpdate := s.LastMetadataUpdate(node.Name) - uuid, ok := node.Labels[annLinodeHostUUID] - if ok && time.Since(lastUpdate) < s.ttl { + uuid, foundLabel := node.Labels[annotations.AnnLinodeHostUUID] + configuredPrivateIP, foundAnnotation := node.Annotations[annotations.AnnLinodeNodePrivateIP] + + metaAge := time.Since(lastUpdate) + if foundLabel && foundAnnotation && metaAge < s.ttl { + klog.V(3).InfoS("Skipping refresh, ttl not reached", + "node", klog.KObj(node), + "ttl", s.ttl, + "metadata_age", metaAge, + ) return nil } linode, err := s.instances.lookupLinode(ctx, node) if err != nil { - klog.Infof("instance lookup error: %s", err.Error()) + klog.V(1).ErrorS(err, "Instance lookup error") return err } - if uuid == linode.HostUUID { + expectedPrivateIP := "" + // linode API response for linode will contain only one private ip + // if any private ip is configured. If it changes in future or linode + // supports other subnets with nodebalancer, this logic needs to be updated. + // https://www.linode.com/docs/api/linode-instances/#linode-view + for _, addr := range linode.IPv4 { + if isPrivate(addr) { + expectedPrivateIP = addr.String() + break + } + } + + if uuid == linode.HostUUID && node.Spec.ProviderID != "" && configuredPrivateIP == expectedPrivateIP { s.SetLastMetadataUpdate(node.Name) return nil } if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - // Get a fresh copy of the node so the resource version is up to date + // Get a fresh copy of the node so the resource version is up-to-date n, err := s.kubeclient.CoreV1().Nodes().Get(ctx, node.Name, metav1.GetOptions{}) if err != nil { return err } - // It may be that the UUID has been set - if n.Labels[annLinodeHostUUID] == linode.HostUUID { - return nil + // Try to update the node UUID if it has not been set + if n.Labels[annotations.AnnLinodeHostUUID] != linode.HostUUID { + n.Labels[annotations.AnnLinodeHostUUID] = linode.HostUUID } - // Try to update the node - n.Labels[annLinodeHostUUID] = linode.HostUUID - _, err = s.kubeclient.CoreV1().Nodes().Update(ctx, node, metav1.UpdateOptions{}) + // Try to update the node ProviderID if it has not been set + if n.Spec.ProviderID == "" { + n.Spec.ProviderID = providerIDPrefix + strconv.Itoa(linode.ID) + } + + // Try to update the expectedPrivateIP if its not set or doesn't match + if n.Annotations[annotations.AnnLinodeNodePrivateIP] != expectedPrivateIP && expectedPrivateIP != "" { + n.Annotations[annotations.AnnLinodeNodePrivateIP] = expectedPrivateIP + } + _, err = s.kubeclient.CoreV1().Nodes().Update(ctx, n, metav1.UpdateOptions{}) return err }); err != nil { - klog.Infof("node update error: %s", err.Error()) + klog.V(1).ErrorS(err, "Node update error") return err } diff --git a/cloud/linode/node_controller_test.go b/cloud/linode/node_controller_test.go new file mode 100644 index 00000000..409bc24d --- /dev/null +++ b/cloud/linode/node_controller_test.go @@ -0,0 +1,229 @@ +package linode + +import ( + "context" + "errors" + "net" + "net/http" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/linode/linode-cloud-controller-manager/cloud/annotations" + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client/mocks" + "github.com/linode/linodego" + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/util/workqueue" +) + +func TestNodeController_Run(t *testing.T) { + // Mock dependencies + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + kubeClient := fake.NewSimpleClientset() + informer := informers.NewSharedInformerFactory(kubeClient, 0).Core().V1().Nodes() + mockQueue := workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[any]{Name: "test"}) + + nodeCtrl := newNodeController(kubeClient, client, informer, newInstances(client)) + nodeCtrl.queue = mockQueue + nodeCtrl.ttl = 1 * time.Second + + // Add test node + node := &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "nodeA", + Labels: map[string]string{}, + Annotations: map[string]string{}, + }, + Spec: v1.NodeSpec{}, + } + _, err := kubeClient.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}) + assert.NoError(t, err, "expected no error during node creation") + + // Start the controller + stopCh := make(chan struct{}) + go nodeCtrl.Run(stopCh) + + client.EXPECT().ListInstances(gomock.Any(), nil).AnyTimes().Return([]linodego.Instance{}, &linodego.Error{Code: http.StatusTooManyRequests, Message: "Too many requests"}) + // Add the node to the informer + err = nodeCtrl.informer.Informer().GetStore().Add(node) + assert.NoError(t, err, "expected no error when adding node to informer") + + // Allow some time for the queue to process + time.Sleep(1 * time.Second) + + // Stop the controller + close(stopCh) +} + +func TestNodeController_processNext(t *testing.T) { + // Mock dependencies + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + kubeClient := fake.NewSimpleClientset() + queue := workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[any]{Name: "testQueue"}) + node := &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Labels: map[string]string{}, + Annotations: map[string]string{}, + }, + Spec: v1.NodeSpec{}, + } + + _, err := kubeClient.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}) + assert.NoError(t, err, "expected no error during node creation") + + controller := &nodeController{ + kubeclient: kubeClient, + instances: newInstances(client), + queue: queue, + metadataLastUpdate: make(map[string]time.Time), + ttl: defaultMetadataTTL, + } + + t.Run("should return no error on unknown errors", func(t *testing.T) { + queue.Add(node) + client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{}, errors.New("lookup failed")) + result := controller.processNext() + assert.True(t, result, "processNext should return true") + if queue.Len() != 0 { + t.Errorf("expected queue to be empty, got %d items", queue.Len()) + } + }) + + t.Run("should return no error if node exists", func(t *testing.T) { + queue.Add(node) + publicIP := net.ParseIP("172.234.31.123") + privateIP := net.ParseIP("192.168.159.135") + client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{ + {ID: 111, Label: "test", IPv4: []*net.IP{&publicIP, &privateIP}, HostUUID: "111"}, + }, nil) + result := controller.processNext() + assert.True(t, result, "processNext should return true") + if queue.Len() != 0 { + t.Errorf("expected queue to be empty, got %d items", queue.Len()) + } + }) + + t.Run("should return no error if queued object is not of type Node", func(t *testing.T) { + queue.Add("abc") + result := controller.processNext() + assert.True(t, result, "processNext should return true") + if queue.Len() != 0 { + t.Errorf("expected queue to be empty, got %d items", queue.Len()) + } + }) + + t.Run("should return no error if node in k8s doesn't exist", func(t *testing.T) { + queue.Add(node) + controller.kubeclient = fake.NewSimpleClientset() + defer func() { controller.kubeclient = kubeClient }() + result := controller.processNext() + assert.True(t, result, "processNext should return true") + if queue.Len() != 0 { + t.Errorf("expected queue to be empty, got %d items", queue.Len()) + } + }) + + t.Run("should return error and requeue when it gets 429 from linode API", func(t *testing.T) { + queue = workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[any]{Name: "testQueue1"}) + queue.Add(node) + controller.queue = queue + client := mocks.NewMockClient(ctrl) + controller.instances = newInstances(client) + retryInterval = 1 * time.Nanosecond + client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{}, &linodego.Error{Code: http.StatusTooManyRequests, Message: "Too many requests"}) + result := controller.processNext() + time.Sleep(1 * time.Second) + assert.True(t, result, "processNext should return true") + if queue.Len() == 0 { + t.Errorf("expected queue to not be empty, got it empty") + } + }) + + t.Run("should return error and requeue when it gets error >= 500 from linode API", func(t *testing.T) { + queue = workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[any]{Name: "testQueue2"}) + queue.Add(node) + controller.queue = queue + client := mocks.NewMockClient(ctrl) + controller.instances = newInstances(client) + retryInterval = 1 * time.Nanosecond + client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{}, &linodego.Error{Code: http.StatusInternalServerError, Message: "Too many requests"}) + result := controller.processNext() + time.Sleep(1 * time.Second) + assert.True(t, result, "processNext should return true") + if queue.Len() == 0 { + t.Errorf("expected queue to not be empty, got it empty") + } + }) +} + +func TestNodeController_handleNode(t *testing.T) { + // Mock dependencies + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + kubeClient := fake.NewSimpleClientset() + node := &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node", + Labels: map[string]string{}, + Annotations: map[string]string{}, + }, + Spec: v1.NodeSpec{ProviderID: "linode://123"}, + } + _, err := kubeClient.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}) + assert.NoError(t, err, "expected no error during node creation") + + instCache := newInstances(client) + + t.Setenv("LINODE_METADATA_TTL", "30") + nodeCtrl := newNodeController(kubeClient, client, nil, instCache) + assert.Equal(t, 30*time.Second, nodeCtrl.ttl, "expected ttl to be 30 seconds") + + // Test: Successful metadata update + publicIP := net.ParseIP("172.234.31.123") + privateIP := net.ParseIP("192.168.159.135") + client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{ + {ID: 123, Label: "test-node", IPv4: []*net.IP{&publicIP, &privateIP}, HostUUID: "123"}, + }, nil) + err = nodeCtrl.handleNode(context.TODO(), node) + assert.NoError(t, err, "expected no error during handleNode") + + // Check metadataLastUpdate + lastUpdate := nodeCtrl.LastMetadataUpdate("test-node") + if time.Since(lastUpdate) > 5*time.Second { + t.Errorf("metadataLastUpdate was not updated correctly") + } + + // Annotations set, no update needed as ttl not reached + node.Labels[annotations.AnnLinodeHostUUID] = "123" + node.Annotations[annotations.AnnLinodeNodePrivateIP] = privateIP.String() + err = nodeCtrl.handleNode(context.TODO(), node) + assert.NoError(t, err, "expected no error during handleNode") + + // Lookup failure for linode instance + client = mocks.NewMockClient(ctrl) + nodeCtrl.instances = newInstances(client) + nodeCtrl.metadataLastUpdate["test-node"] = time.Now().Add(-2 * nodeCtrl.ttl) + client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{}, errors.New("lookup failed")) + err = nodeCtrl.handleNode(context.TODO(), node) + assert.Error(t, err, "expected error during handleNode, got nil") + + // All fields already set + client = mocks.NewMockClient(ctrl) + nodeCtrl.instances = newInstances(client) + nodeCtrl.metadataLastUpdate["test-node"] = time.Now().Add(-2 * nodeCtrl.ttl) + client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{ + {ID: 123, Label: "test-node", IPv4: []*net.IP{&publicIP, &privateIP}, HostUUID: "123"}, + }, nil) + err = nodeCtrl.handleNode(context.TODO(), node) + assert.NoError(t, err, "expected no error during handleNode") +} diff --git a/cloud/linode/route_controller.go b/cloud/linode/route_controller.go new file mode 100644 index 00000000..b1aa112b --- /dev/null +++ b/cloud/linode/route_controller.go @@ -0,0 +1,262 @@ +package linode + +import ( + "context" + "fmt" + "os" + "strconv" + "strings" + "sync" + "time" + + "github.com/linode/linodego" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog/v2" + + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client" +) + +type routeCache struct { + Mu sync.RWMutex + routes map[int][]linodego.VPCIP + lastUpdate time.Time + ttl time.Duration +} + +// RefreshCache checks if cache has expired and updates it accordingly +func (rc *routeCache) refreshRoutes(ctx context.Context, client client.Client) { + rc.Mu.Lock() + defer rc.Mu.Unlock() + + if time.Since(rc.lastUpdate) < rc.ttl { + return + } + + vpcNodes := map[int][]linodego.VPCIP{} + vpcNames := strings.Split(Options.VPCNames, ",") + for _, v := range vpcNames { + vpcName := strings.TrimSpace(v) + if vpcName == "" { + continue + } + resp, err := GetVPCIPAddresses(ctx, client, vpcName) + if err != nil { + klog.Errorf("failed updating cache for VPC %s. Error: %s", vpcName, err.Error()) + continue + } + for _, r := range resp { + vpcNodes[r.LinodeID] = append(vpcNodes[r.LinodeID], r) + } + } + + rc.routes = vpcNodes + rc.lastUpdate = time.Now() +} + +type routes struct { + client client.Client + instances *instances + routeCache *routeCache +} + +func newRoutes(client client.Client, instanceCache *instances) (cloudprovider.Routes, error) { + timeout := 60 + if raw, ok := os.LookupEnv("LINODE_ROUTES_CACHE_TTL_SECONDS"); ok { + if t, _ := strconv.Atoi(raw); t > 0 { + timeout = t + } + } + klog.V(3).Infof("TTL for routeCache set to %d seconds", timeout) + + if Options.EnableRouteController && Options.VPCNames == "" { + return nil, fmt.Errorf("cannot enable route controller as vpc-names is empty") + } + + return &routes{ + client: client, + instances: instanceCache, + routeCache: &routeCache{ + routes: make(map[int][]linodego.VPCIP, 0), + ttl: time.Duration(timeout) * time.Second, + }, + }, nil +} + +// instanceRoutesByID returns routes for given instance id +func (r *routes) instanceRoutesByID(id int) ([]linodego.VPCIP, error) { + r.routeCache.Mu.RLock() + defer r.routeCache.Mu.RUnlock() + instanceRoutes, ok := r.routeCache.routes[id] + if !ok { + return nil, fmt.Errorf("no routes found for instance %d", id) + } + return instanceRoutes, nil +} + +// getInstanceRoutes returns routes for given instance id +// It refreshes routeCache if it has expired +func (r *routes) getInstanceRoutes(ctx context.Context, id int) ([]linodego.VPCIP, error) { + r.routeCache.refreshRoutes(ctx, r.client) + return r.instanceRoutesByID(id) +} + +// getInstanceFromName returns linode instance with given name if it exists +func (r *routes) getInstanceFromName(ctx context.Context, name string) (*linodego.Instance, error) { + // create node object + node := &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } + + // fetch instance with specified node name + instance, err := r.instances.lookupLinode(ctx, node) + if err != nil { + klog.Errorf("failed getting linode %s", name) + return nil, err + } + return instance, nil +} + +// CreateRoute adds route's subnet to ip_ranges of target node's VPC interface +func (r *routes) CreateRoute(ctx context.Context, clusterName string, nameHint string, route *cloudprovider.Route) error { + instance, err := r.getInstanceFromName(ctx, string(route.TargetNode)) + if err != nil { + return err + } + + // fetch instance routes + instanceRoutes, err := r.getInstanceRoutes(ctx, instance.ID) + if err != nil { + return err + } + + // check already configured routes + intfRoutes := []string{} + intfVPCIP := linodego.VPCIP{} + + for _, vpcid := range GetAllVPCIDs() { + for _, ir := range instanceRoutes { + if ir.VPCID != vpcid { + continue + } + + if ir.Address != nil { + intfVPCIP = ir + continue + } + + if ir.AddressRange != nil && *ir.AddressRange == route.DestinationCIDR { + klog.V(4).Infof("Route already exists for node %s", route.TargetNode) + return nil + } + + intfRoutes = append(intfRoutes, *ir.AddressRange) + } + } + + if intfVPCIP.Address == nil { + return fmt.Errorf("unable to add route %s for node %s. no valid interface found", route.DestinationCIDR, route.TargetNode) + } + + intfRoutes = append(intfRoutes, route.DestinationCIDR) + interfaceUpdateOptions := linodego.InstanceConfigInterfaceUpdateOptions{ + IPRanges: &intfRoutes, + } + + resp, err := r.client.UpdateInstanceConfigInterface(ctx, instance.ID, intfVPCIP.ConfigID, intfVPCIP.InterfaceID, interfaceUpdateOptions) + if err != nil { + return err + } + klog.V(4).Infof("Added routes for node %s. Current routes: %v", route.TargetNode, resp.IPRanges) + return nil +} + +// DeleteRoute removes route's subnet from ip_ranges of target node's VPC interface +func (r *routes) DeleteRoute(ctx context.Context, clusterName string, route *cloudprovider.Route) error { + instance, err := r.getInstanceFromName(ctx, string(route.TargetNode)) + if err != nil { + return err + } + + instanceRoutes, err := r.getInstanceRoutes(ctx, instance.ID) + if err != nil { + return err + } + + // check already configured routes + intfRoutes := []string{} + intfVPCIP := linodego.VPCIP{} + + for _, vpcid := range GetAllVPCIDs() { + for _, ir := range instanceRoutes { + if ir.VPCID != vpcid { + continue + } + + if ir.Address != nil { + intfVPCIP = ir + continue + } + + if ir.AddressRange != nil && *ir.AddressRange == route.DestinationCIDR { + continue + } + + intfRoutes = append(intfRoutes, *ir.AddressRange) + } + } + + if intfVPCIP.Address == nil { + return fmt.Errorf("unable to remove route %s for node %s. no valid interface found", route.DestinationCIDR, route.TargetNode) + } + + interfaceUpdateOptions := linodego.InstanceConfigInterfaceUpdateOptions{ + IPRanges: &intfRoutes, + } + resp, err := r.client.UpdateInstanceConfigInterface(ctx, instance.ID, intfVPCIP.ConfigID, intfVPCIP.InterfaceID, interfaceUpdateOptions) + if err != nil { + return err + } + klog.V(4).Infof("Deleted route for node %s. Current routes: %v", route.TargetNode, resp.IPRanges) + return nil +} + +// ListRoutes fetches routes configured on all instances which have VPC interfaces +func (r *routes) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) { + klog.V(4).Infof("Fetching routes configured on the cluster") + instances, err := r.instances.listAllInstances(ctx) + if err != nil { + return nil, err + } + + var configuredRoutes []*cloudprovider.Route + for _, instance := range instances { + instanceRoutes, err := r.getInstanceRoutes(ctx, instance.ID) + if err != nil { + klog.Errorf("Failed finding routes for instance id %d. Error: %v", instance.ID, err) + continue + } + + // check for configured routes + for _, vpcid := range GetAllVPCIDs() { + for _, ir := range instanceRoutes { + if ir.Address != nil || ir.VPCID != vpcid { + continue + } + + if ir.AddressRange != nil { + route := &cloudprovider.Route{ + TargetNode: types.NodeName(instance.Label), + DestinationCIDR: *ir.AddressRange, + } + configuredRoutes = append(configuredRoutes, route) + } + } + } + } + return configuredRoutes, nil +} diff --git a/cloud/linode/route_controller_test.go b/cloud/linode/route_controller_test.go new file mode 100644 index 00000000..e6f2bff0 --- /dev/null +++ b/cloud/linode/route_controller_test.go @@ -0,0 +1,455 @@ +package linode + +import ( + "context" + "net" + "testing" + + "github.com/golang/mock/gomock" + "github.com/linode/linodego" + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/types" + cloudprovider "k8s.io/cloud-provider" + "k8s.io/utils/ptr" + + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client/mocks" +) + +func TestListRoutes(t *testing.T) { + Options.VPCNames = "test,abc" + vpcIDs["test"] = 1 + vpcIDs["abc"] = 2 + Options.EnableRouteController = true + + nodeID := 123 + name := "mock-instance" + publicIPv4 := net.ParseIP("45.76.101.25") + privateIPv4 := net.ParseIP("192.168.133.65") + linodeType := "g6-standard-1" + region := "us-east" + + t.Run("should return empty if no instance exists in cluster", func(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + instanceCache := newInstances(client) + routeController, err := newRoutes(client, instanceCache) + assert.NoError(t, err) + + client.EXPECT().ListInstances(gomock.Any(), gomock.Any()).Times(1).Return([]linodego.Instance{}, nil) + client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).Times(2).Return([]linodego.VPCIP{}, nil) + routes, err := routeController.ListRoutes(ctx, "test") + assert.NoError(t, err) + assert.Empty(t, routes) + }) + + validInstance := linodego.Instance{ + ID: nodeID, + Label: name, + Type: linodeType, + Region: region, + IPv4: []*net.IP{&publicIPv4, &privateIPv4}, + } + + t.Run("should return no routes if instance exists but is not connected to VPC", func(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + instanceCache := newInstances(client) + routeController, err := newRoutes(client, instanceCache) + assert.NoError(t, err) + + client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{validInstance}, nil) + client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).Times(2).Return([]linodego.VPCIP{}, nil) + routes, err := routeController.ListRoutes(ctx, "test") + assert.NoError(t, err) + assert.Empty(t, routes) + }) + + vpcIP := "10.0.0.2" + noRoutesInVPC := []linodego.VPCIP{ + { + Address: &vpcIP, + AddressRange: nil, + VPCID: vpcIDs["test"], + NAT1To1: nil, + LinodeID: nodeID, + }, + } + + t.Run("should return no routes if instance exists, connected to VPC but no ip_ranges configured", func(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + instanceCache := newInstances(client) + routeController, err := newRoutes(client, instanceCache) + assert.NoError(t, err) + + client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{validInstance}, nil) + client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).Times(4).Return(noRoutesInVPC, nil) + routes, err := routeController.ListRoutes(ctx, "test") + assert.NoError(t, err) + assert.Empty(t, routes) + }) + + addressRange1 := "10.192.0.0/24" + addressRange2 := "10.192.10.0/24" + routesInVPC := []linodego.VPCIP{ + { + Address: &vpcIP, + AddressRange: nil, + VPCID: vpcIDs["test"], + NAT1To1: nil, + LinodeID: nodeID, + }, + { + Address: nil, + AddressRange: &addressRange1, + VPCID: vpcIDs["test"], + NAT1To1: nil, + LinodeID: nodeID, + }, + { + Address: nil, + AddressRange: &addressRange2, + VPCID: vpcIDs["test"], + NAT1To1: nil, + LinodeID: nodeID, + }, + } + + t.Run("should return routes if instance exists, connected to VPC and ip_ranges configured", func(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + instanceCache := newInstances(client) + routeController, err := newRoutes(client, instanceCache) + assert.NoError(t, err) + + client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{validInstance}, nil) + client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).Times(4).Return(routesInVPC, nil) + routes, err := routeController.ListRoutes(ctx, "test") + assert.NoError(t, err) + assert.NotEmpty(t, routes) + assert.Equal(t, addressRange1, routes[0].DestinationCIDR) + assert.Equal(t, addressRange2, routes[1].DestinationCIDR) + }) + + routesInDifferentVPC := []linodego.VPCIP{ + { + Address: &vpcIP, + AddressRange: nil, + VPCID: 100, + NAT1To1: nil, + LinodeID: nodeID, + }, + { + Address: nil, + AddressRange: &addressRange1, + VPCID: 100, + NAT1To1: nil, + LinodeID: nodeID, + }, + { + Address: nil, + AddressRange: &addressRange2, + VPCID: 100, + NAT1To1: nil, + LinodeID: nodeID, + }, + } + + t.Run("should return no routes if instance exists, connected to VPC and ip_ranges configured but vpc id doesn't match", func(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + instanceCache := newInstances(client) + routeController, err := newRoutes(client, instanceCache) + assert.NoError(t, err) + + client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{validInstance}, nil) + client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).Times(4).Return(routesInDifferentVPC, nil) + routes, err := routeController.ListRoutes(ctx, "test") + assert.NoError(t, err) + assert.Empty(t, routes) + }) + + t.Run("should return routes if multiple instances exists, connected to VPCs and ip_ranges configured", func(t *testing.T) { + ctx := context.Background() + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + instanceCache := newInstances(client) + routeController, err := newRoutes(client, instanceCache) + assert.NoError(t, err) + + vpcIP2 := "10.0.0.3" + addressRange3 := "10.192.40.0/24" + addressRange4 := "10.192.50.0/24" + + validInstance2 := linodego.Instance{ + ID: 124, + Label: "mock-instance2", + Type: linodeType, + Region: region, + IPv4: []*net.IP{&publicIPv4, &privateIPv4}, + } + + routesInVPC2 := []linodego.VPCIP{ + { + Address: &vpcIP2, + AddressRange: nil, + VPCID: vpcIDs["abc"], + NAT1To1: nil, + LinodeID: 124, + }, + { + Address: nil, + AddressRange: &addressRange3, + VPCID: vpcIDs["abc"], + NAT1To1: nil, + LinodeID: 124, + }, + { + Address: nil, + AddressRange: &addressRange4, + VPCID: vpcIDs["abc"], + NAT1To1: nil, + LinodeID: 124, + }, + } + + client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{validInstance, validInstance2}, nil) + c1 := client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).Times(1).Return(routesInVPC, nil) + c2 := client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).After(c1).Times(1).Return(routesInVPC2, nil) + c3 := client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).After(c2).Times(1).Return(routesInVPC, nil) + client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).After(c3).Times(1).Return(routesInVPC2, nil) + routes, err := routeController.ListRoutes(ctx, "test") + assert.NoError(t, err) + assert.NotEmpty(t, routes) + cidrs := make([]string, len(routes)) + for i, value := range routes { + cidrs[i] = value.DestinationCIDR + } + assert.Contains(t, cidrs, addressRange1) + assert.Contains(t, cidrs, addressRange2) + assert.Contains(t, cidrs, addressRange3) + assert.Contains(t, cidrs, addressRange4) + }) +} + +func TestCreateRoute(t *testing.T) { + ctx := context.Background() + Options.VPCNames = "dummy" + vpcIDs["dummy"] = 1 + Options.EnableRouteController = true + + nodeID := 123 + name := "mock-instance" + publicIPv4 := net.ParseIP("45.76.101.25") + privateIPv4 := net.ParseIP("192.168.133.65") + linodeType := "g6-standard-1" + region := "us-east" + validInstance := linodego.Instance{ + ID: nodeID, + Label: name, + Type: linodeType, + Region: region, + IPv4: []*net.IP{&publicIPv4, &privateIPv4}, + } + + vpcIP := "10.0.0.2" + noRoutesInVPC := []linodego.VPCIP{ + { + Address: &vpcIP, + AddressRange: nil, + VPCID: vpcIDs["dummy"], + NAT1To1: nil, + LinodeID: nodeID, + }, + } + + instanceConfigIntfWithVPCAndRoute := linodego.InstanceConfigInterface{ + VPCID: ptr.To(vpcIDs["dummy"]), + IPv4: &linodego.VPCIPv4{VPC: vpcIP}, + IPRanges: []string{"10.10.10.0/24"}, + } + route := &cloudprovider.Route{ + Name: "route1", + TargetNode: types.NodeName(name), + DestinationCIDR: "10.10.10.0/24", + } + + t.Run("should return no error if instance exists, connected to VPC we add a route", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + instanceCache := newInstances(client) + routeController, err := newRoutes(client, instanceCache) + assert.NoError(t, err) + + client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{validInstance}, nil) + client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).Times(2).Return(noRoutesInVPC, nil) + client.EXPECT().UpdateInstanceConfigInterface(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(1).Return(&instanceConfigIntfWithVPCAndRoute, nil) + err = routeController.CreateRoute(ctx, "dummy", "dummy", route) + assert.NoError(t, err) + }) + + addressRange1 := "10.10.10.0/24" + routesInVPC := []linodego.VPCIP{ + { + Address: &vpcIP, + AddressRange: nil, + VPCID: vpcIDs["dummy"], + NAT1To1: nil, + LinodeID: nodeID, + }, + { + Address: nil, + AddressRange: &addressRange1, + VPCID: vpcIDs["dummy"], + NAT1To1: nil, + LinodeID: nodeID, + }, + } + + t.Run("should return no error if instance exists, connected to VPC and route already exists", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + instanceCache := newInstances(client) + routeController, err := newRoutes(client, instanceCache) + assert.NoError(t, err) + + client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{validInstance}, nil) + client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).Times(2).Return(routesInVPC, nil) + err = routeController.CreateRoute(ctx, "dummy", "dummy", route) + assert.NoError(t, err) + }) + + t.Run("should return error if instance doesn't exist", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + instanceCache := newInstances(client) + routeController, err := newRoutes(client, instanceCache) + assert.NoError(t, err) + + client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{}, nil) + client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).Times(1).Return([]linodego.VPCIP{}, nil) + err = routeController.CreateRoute(ctx, "dummy", "dummy", route) + assert.Error(t, err) + }) +} + +func TestDeleteRoute(t *testing.T) { + Options.VPCNames = "dummy" + vpcIDs["dummy"] = 1 + Options.EnableRouteController = true + + ctx := context.Background() + + nodeID := 123 + name := "mock-instance" + publicIPv4 := net.ParseIP("45.76.101.25") + privateIPv4 := net.ParseIP("192.168.133.65") + linodeType := "g6-standard-1" + region := "us-east" + validInstance := linodego.Instance{ + ID: nodeID, + Label: name, + Type: linodeType, + Region: region, + IPv4: []*net.IP{&publicIPv4, &privateIPv4}, + } + + vpcIP := "10.0.0.2" + route := &cloudprovider.Route{ + Name: "route1", + TargetNode: types.NodeName(name), + DestinationCIDR: "10.10.10.0/24", + } + + t.Run("should return error if instance doesn't exist", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + instanceCache := newInstances(client) + routeController, err := newRoutes(client, instanceCache) + assert.NoError(t, err) + + client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{}, nil) + client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).Times(1).Return([]linodego.VPCIP{}, nil) + err = routeController.DeleteRoute(ctx, "dummy", route) + assert.Error(t, err) + }) + + addressRange1 := "10.10.10.0/24" + noRoutesInVPC := []linodego.VPCIP{ + { + Address: &vpcIP, + AddressRange: nil, + VPCID: vpcIDs["dummy"], + NAT1To1: nil, + LinodeID: nodeID, + }, + } + + instanceConfigIntfWithVPCAndNoRoute := linodego.InstanceConfigInterface{ + VPCID: ptr.To(vpcIDs["dummy"]), + IPv4: &linodego.VPCIPv4{VPC: vpcIP}, + IPRanges: []string{}, + } + + t.Run("should return no error if instance exists, connected to VPC, route doesn't exist and we try to delete route", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + instanceCache := newInstances(client) + routeController, err := newRoutes(client, instanceCache) + assert.NoError(t, err) + + client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{validInstance}, nil) + client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).Times(2).Return(noRoutesInVPC, nil) + client.EXPECT().UpdateInstanceConfigInterface(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(1).Return(&instanceConfigIntfWithVPCAndNoRoute, nil) + err = routeController.DeleteRoute(ctx, "dummy", route) + assert.NoError(t, err) + }) + + routesInVPC := []linodego.VPCIP{ + { + Address: &vpcIP, + AddressRange: nil, + VPCID: vpcIDs["dummy"], + NAT1To1: nil, + LinodeID: nodeID, + }, + { + Address: nil, + AddressRange: &addressRange1, + VPCID: vpcIDs["dummy"], + NAT1To1: nil, + LinodeID: nodeID, + }, + } + + t.Run("should return no error if instance exists, connected to VPC and route is deleted", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + instanceCache := newInstances(client) + routeController, err := newRoutes(client, instanceCache) + assert.NoError(t, err) + + client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{validInstance}, nil) + client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).Times(2).Return(routesInVPC, nil) + client.EXPECT().UpdateInstanceConfigInterface(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(1).Return(&instanceConfigIntfWithVPCAndNoRoute, nil) + err = routeController.DeleteRoute(ctx, "dummy", route) + assert.NoError(t, err) + }) +} diff --git a/cloud/linode/service_controller.go b/cloud/linode/service_controller.go index 3c4364cd..684cac7e 100644 --- a/cloud/linode/service_controller.go +++ b/cloud/linode/service_controller.go @@ -3,6 +3,7 @@ package linode import ( "context" "net/http" + "strings" "time" "github.com/appscode/go/wait" @@ -14,25 +15,25 @@ import ( "k8s.io/klog/v2" ) -const retryInterval = time.Minute * 1 +var retryInterval = time.Minute * 1 type serviceController struct { loadbalancers *loadbalancers informer v1informers.ServiceInformer - queue workqueue.DelayingInterface + queue workqueue.TypedDelayingInterface[any] } func newServiceController(loadbalancers *loadbalancers, informer v1informers.ServiceInformer) *serviceController { return &serviceController{ loadbalancers: loadbalancers, informer: informer, - queue: workqueue.NewDelayingQueue(), + queue: workqueue.NewTypedDelayingQueue[any](), } } func (s *serviceController) Run(stopCh <-chan struct{}) { - s.informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + if _, err := s.informer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ DeleteFunc: func(obj interface{}) { service, ok := obj.(*v1.Service) if !ok { @@ -46,7 +47,24 @@ func (s *serviceController) Run(stopCh <-chan struct{}) { klog.Infof("ServiceController will handle service (%s) deletion", getServiceNn(service)) s.queue.Add(service) }, - }) + UpdateFunc: func(oldObj, newObj interface{}) { + newSvc, ok := newObj.(*v1.Service) + if !ok { + return + } + oldSvc, ok := oldObj.(*v1.Service) + if !ok { + return + } + + if newSvc.Spec.Type != "LoadBalancer" && oldSvc.Spec.Type == "LoadBalancer" { + klog.Infof("ServiceController will handle service (%s) LoadBalancer deletion", getServiceNn(oldSvc)) + s.queue.Add(oldSvc) + } + }, + }); err != nil { + klog.Errorf("ServiceController didn't successfully register it's Informer %s", err) + } go wait.Until(s.worker, time.Second, stopCh) s.informer.Informer().Run(stopCh) @@ -91,5 +109,6 @@ func (s *serviceController) processNextDeletion() bool { func (s *serviceController) handleServiceDeleted(service *v1.Service) error { klog.Infof("ServiceController handling service (%s) deletion", getServiceNn(service)) - return s.loadbalancers.EnsureLoadBalancerDeleted(context.Background(), service.ClusterName, service) + clusterName := strings.TrimPrefix(service.Namespace, "kube-system-") + return s.loadbalancers.EnsureLoadBalancerDeleted(context.Background(), clusterName, service) } diff --git a/cloud/linode/service_controller_test.go b/cloud/linode/service_controller_test.go new file mode 100644 index 00000000..8d90d9ea --- /dev/null +++ b/cloud/linode/service_controller_test.go @@ -0,0 +1,111 @@ +package linode + +import ( + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client/mocks" + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/client-go/util/workqueue" +) + +func Test_serviceController_Run(t *testing.T) { + // Mock dependencies + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + kubeClient := fake.NewSimpleClientset() + informer := informers.NewSharedInformerFactory(kubeClient, 0).Core().V1().Services() + mockQueue := workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[any]{Name: "test"}) + + loadbalancers := newLoadbalancers(client, "us-east").(*loadbalancers) + svcCtrl := newServiceController(loadbalancers, informer) + svcCtrl.queue = mockQueue + + svc := createTestService() + svc.Spec.Type = "LoadBalancer" + _, err := kubeClient.CoreV1().Services("test-ns").Create(context.TODO(), svc, metav1.CreateOptions{}) + assert.NoError(t, err, "expected no error during svc creation") + + // Start the controller + stopCh := make(chan struct{}) + go svcCtrl.Run(stopCh) + + // Add svc to the informer + err = svcCtrl.informer.Informer().GetStore().Add(svc) + assert.NoError(t, err, "expected no error when adding svc to informer") + + // Allow some time for the queue to process + time.Sleep(1 * time.Second) + + // Stop the controller + close(stopCh) +} + +func Test_serviceController_processNextDeletion(t *testing.T) { + type fields struct { + loadbalancers *loadbalancers + queue workqueue.TypedDelayingInterface[any] + Client *mocks.MockClient + } + tests := []struct { + name string + fields fields + Setup func(*fields) + want bool + queueLen int + }{ + { + name: "Invalid service type", + fields: fields{ + loadbalancers: nil, + }, + Setup: func(f *fields) { + f.loadbalancers = &loadbalancers{client: f.Client, zone: "test", loadBalancerType: Options.LoadBalancerType} + f.queue = workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[any]{Name: "testQueue"}) + f.queue.Add("test") + }, + want: true, + queueLen: 0, + }, + { + name: "Valid service type", + fields: fields{ + loadbalancers: nil, + }, + Setup: func(f *fields) { + f.loadbalancers = &loadbalancers{client: f.Client, zone: "test", loadBalancerType: Options.LoadBalancerType} + f.queue = workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[any]{Name: "testQueue"}) + svc := createTestService() + f.queue.Add(svc) + }, + want: true, + queueLen: 0, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s := &serviceController{ + loadbalancers: tt.fields.loadbalancers, + queue: tt.fields.queue, + } + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + tt.fields.Client = client + tt.Setup(&tt.fields) + s.loadbalancers = tt.fields.loadbalancers + s.queue = tt.fields.queue + s.loadbalancers.client = tt.fields.Client + if got := s.processNextDeletion(); got != tt.want { + t.Errorf("serviceController.processNextDeletion() = %v, want %v", got, tt.want) + } + assert.Equal(t, tt.queueLen, tt.fields.queue.Len()) + }) + } +} diff --git a/cloud/linode/tools.go b/cloud/linode/tools.go new file mode 100644 index 00000000..3b2e6efe --- /dev/null +++ b/cloud/linode/tools.go @@ -0,0 +1,8 @@ +//go:build tools +// +build tools + +package tools + +import ( + _ "github.com/golang/mock/mockgen" +) diff --git a/cloud/linode/vpc.go b/cloud/linode/vpc.go new file mode 100644 index 00000000..01c1ed14 --- /dev/null +++ b/cloud/linode/vpc.go @@ -0,0 +1,79 @@ +package linode + +import ( + "context" + "fmt" + "net/http" + "strings" + "sync" + + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client" + "github.com/linode/linodego" + "k8s.io/klog/v2" +) + +var ( + Mu sync.RWMutex + // vpcIDs map stores vpc id's for given vpc labels + vpcIDs = make(map[string]int, 0) +) + +type vpcLookupError struct { + value string +} + +func (e vpcLookupError) Error() string { + return fmt.Sprintf("failed to find VPC: %q", e.value) +} + +// GetAllVPCIDs returns vpc ids stored in map +func GetAllVPCIDs() []int { + Mu.Lock() + defer Mu.Unlock() + values := make([]int, 0, len(vpcIDs)) + for _, v := range vpcIDs { + values = append(values, v) + } + return values +} + +// GetVPCID returns the VPC id of given VPC label +func GetVPCID(ctx context.Context, client client.Client, vpcName string) (int, error) { + Mu.Lock() + defer Mu.Unlock() + + // check if map contains vpc id for given label + if vpcid, ok := vpcIDs[vpcName]; ok { + return vpcid, nil + } + vpcs, err := client.ListVPCs(ctx, &linodego.ListOptions{}) + if err != nil { + return 0, err + } + for _, vpc := range vpcs { + if vpc.Label == vpcName { + vpcIDs[vpcName] = vpc.ID + return vpc.ID, nil + } + } + return 0, vpcLookupError{vpcName} +} + +// GetVPCIPAddresses returns vpc ip's for given VPC label +func GetVPCIPAddresses(ctx context.Context, client client.Client, vpcName string) ([]linodego.VPCIP, error) { + vpcID, err := GetVPCID(ctx, client, strings.TrimSpace(vpcName)) + if err != nil { + return nil, err + } + resp, err := client.ListVPCIPAddresses(ctx, vpcID, linodego.NewListOptions(0, "")) + if err != nil { + if linodego.ErrHasStatus(err, http.StatusNotFound) { + Mu.Lock() + defer Mu.Unlock() + klog.Errorf("vpc %s not found. Deleting entry from cache", vpcName) + delete(vpcIDs, vpcName) + } + return nil, err + } + return resp, nil +} diff --git a/cloud/linode/vpc_test.go b/cloud/linode/vpc_test.go new file mode 100644 index 00000000..9e99b675 --- /dev/null +++ b/cloud/linode/vpc_test.go @@ -0,0 +1,149 @@ +package linode + +import ( + "context" + "errors" + "net/http" + "reflect" + "sort" + "testing" + + "github.com/golang/mock/gomock" + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client/mocks" + "github.com/linode/linodego" + "github.com/stretchr/testify/assert" +) + +func TestGetAllVPCIDs(t *testing.T) { + tests := []struct { + name string + vpcIDs map[string]int + want []int + }{ + { + name: "multiple vpcs present", + vpcIDs: map[string]int{"test1": 1, "test2": 2, "test3": 3}, + want: []int{1, 2, 3}, + }, + { + name: "no vpc present", + vpcIDs: map[string]int{}, + want: []int{}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + vpcIDs = tt.vpcIDs + got := GetAllVPCIDs() + sort.Ints(got) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("GetAllVPCIDs() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestGetVPCID(t *testing.T) { + t.Run("vpcID in cache", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + vpcIDs = map[string]int{"test1": 1, "test2": 2, "test3": 3} + got, err := GetVPCID(context.TODO(), client, "test3") + if err != nil { + t.Errorf("GetVPCID() error = %v", err) + return + } + if got != vpcIDs["test3"] { + t.Errorf("GetVPCID() = %v, want %v", got, vpcIDs["test3"]) + } + }) + + t.Run("vpcID not in cache and listVPCs return error", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + vpcIDs = map[string]int{"test1": 1, "test2": 2, "test3": 3} + client.EXPECT().ListVPCs(gomock.Any(), gomock.Any()).Times(1).Return([]linodego.VPC{}, errors.New("error")) + got, err := GetVPCID(context.TODO(), client, "test4") + assert.Error(t, err) + if got != 0 { + t.Errorf("GetVPCID() = %v, want %v", got, 0) + } + }) + + t.Run("vpcID not in cache and listVPCs return nothing", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + vpcIDs = map[string]int{"test1": 1, "test2": 2, "test3": 3} + client.EXPECT().ListVPCs(gomock.Any(), gomock.Any()).Times(1).Return([]linodego.VPC{}, nil) + got, err := GetVPCID(context.TODO(), client, "test4") + assert.ErrorIs(t, err, vpcLookupError{"test4"}) + if got != 0 { + t.Errorf("GetVPCID() = %v, want %v", got, 0) + } + }) + + t.Run("vpcID not in cache and listVPCs return vpc info", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + vpcIDs = map[string]int{"test1": 1, "test2": 2, "test3": 3} + client.EXPECT().ListVPCs(gomock.Any(), gomock.Any()).Times(1).Return([]linodego.VPC{{ID: 4, Label: "test4"}}, nil) + got, err := GetVPCID(context.TODO(), client, "test4") + assert.NoError(t, err) + if got != 4 { + t.Errorf("GetVPCID() = %v, want %v", got, 4) + } + }) +} + +func TestGetVPCIPAddresses(t *testing.T) { + t.Run("vpc id not found", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + vpcIDs = map[string]int{"test1": 1, "test2": 2, "test3": 3} + client.EXPECT().ListVPCs(gomock.Any(), gomock.Any()).Times(1).Return([]linodego.VPC{}, nil) + _, err := GetVPCIPAddresses(context.TODO(), client, "test4") + assert.Error(t, err) + }) + + t.Run("vpc id found but listing ip addresses fails with 404 error", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + vpcIDs = map[string]int{"test1": 1, "test2": 2, "test3": 3} + client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).Times(1).Return([]linodego.VPCIP{}, &linodego.Error{Code: http.StatusNotFound, Message: "[404] [label] VPC not found"}) + _, err := GetVPCIPAddresses(context.TODO(), client, "test3") + assert.Error(t, err) + _, exists := vpcIDs["test3"] + assert.False(t, exists, "test3 key should get deleted from vpcIDs map") + }) + + t.Run("vpc id found but listing ip addresses fails with 500 error", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + vpcIDs = map[string]int{"test1": 1, "test2": 2, "test3": 3} + client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).Times(1).Return([]linodego.VPCIP{}, &linodego.Error{Code: http.StatusInternalServerError, Message: "[500] [label] Internal Server Error"}) + _, err := GetVPCIPAddresses(context.TODO(), client, "test1") + assert.Error(t, err) + _, exists := vpcIDs["test1"] + assert.True(t, exists, "test1 key should not get deleted from vpcIDs map") + }) + + t.Run("vpc id found and listing vpc ipaddresses succeeds", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + vpcIDs = map[string]int{"test1": 1, "test2": 2, "test3": 3} + client.EXPECT().ListVPCs(gomock.Any(), gomock.Any()).Times(1).Return([]linodego.VPC{{ID: 10, Label: "test10"}}, nil) + client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).Times(1).Return([]linodego.VPCIP{}, nil) + _, err := GetVPCIPAddresses(context.TODO(), client, "test10") + assert.NoError(t, err) + _, exists := vpcIDs["test10"] + assert.True(t, exists, "test10 key should be present in vpcIDs map") + }) +} diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 00000000..19eef857 --- /dev/null +++ b/codecov.yml @@ -0,0 +1,3 @@ +ignore: + - "cloud/linode/client/mocks" + - "cloud/linode/client/client_with_metrics.go" diff --git a/deploy/ccm-linode-template.yaml b/deploy/ccm-linode-template.yaml index 6d4c53ce..4f0048b1 100644 --- a/deploy/ccm-linode-template.yaml +++ b/deploy/ccm-linode-template.yaml @@ -23,6 +23,9 @@ rules: - apiGroups: [""] resources: ["endpoints"] verbs: ["get", "watch", "list", "update", "create"] +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "update", "create"] - apiGroups: [""] resources: ["nodes"] verbs: ["get", "watch", "list", "update", "delete", "patch"] @@ -101,10 +104,10 @@ spec: imagePullPolicy: Always name: ccm-linode args: - - --leader-elect-resource-lock=endpoints + - --leader-elect-resource-lock=leases - --v=3 - - --port=0 - --secure-port=10253 + - --webhook-secure-port=0 volumeMounts: - mountPath: /etc/kubernetes name: k8s diff --git a/deploy/chart/templates/clusterrole-rbac.yaml b/deploy/chart/templates/clusterrole-rbac.yaml index 29532135..42dbbeb8 100644 --- a/deploy/chart/templates/clusterrole-rbac.yaml +++ b/deploy/chart/templates/clusterrole-rbac.yaml @@ -6,6 +6,9 @@ rules: - apiGroups: [""] resources: ["endpoints"] verbs: ["get", "watch", "list", "update", "create"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "update", "create"] - apiGroups: [""] resources: ["nodes"] verbs: ["get", "watch", "list", "update", "delete", "patch"] @@ -27,3 +30,11 @@ rules: - apiGroups: [""] resources: ["services/status"] verbs: ["get", "watch", "list", "update", "patch"] +{{- if .Values.sharedIPLoadBalancing }} + - apiGroups: ["cilium.io"] + resources: ["ciliumloadbalancerippools"] + verbs: ["get", "watch", "list", "update", "create", "delete"] + - apiGroups: ["cilium.io"] + resources: ["ciliumbgppeeringpolicies"] + verbs: ["get", "watch", "list", "create"] +{{- end }} diff --git a/deploy/chart/templates/daemonset.yaml b/deploy/chart/templates/daemonset.yaml index 86d45dd5..f2ca2985 100644 --- a/deploy/chart/templates/daemonset.yaml +++ b/deploy/chart/templates/daemonset.yaml @@ -29,13 +29,57 @@ spec: imagePullPolicy: {{ .Values.image.pullPolicy }} name: ccm-linode args: - - --leader-elect-resource-lock=endpoints + - --leader-elect-resource-lock=leases - --v=3 - - --port=0 - --secure-port=10253 + - --webhook-secure-port=0 + {{- if .Values.linodegoDebug }} + - --linodego-debug={{ .Values.linodegoDebug }} + {{- end }} + {{- if .Values.routeController }} + - --enable-route-controller=true + {{- if and .Values.routeController.vpcName .Values.routeController.vpcNames }} + {{- fail "Both vpcName and vpcNames are set. Please use only vpcNames." }} + {{- end }} + {{- if not (or .Values.routeController.vpcName .Values.routeController.vpcNames) }} + {{- fail "Neither vpcName nor vpcNames is set. Please set one of them." }} + {{- end }} + {{- if .Values.routeController.vpcName }} + - --vpc-name={{ .Values.routeController.vpcName }} + {{- end }} + {{- if .Values.routeController.vpcNames }} + - --vpc-names={{ .Values.routeController.vpcNames }} + {{- end }} + - --configure-cloud-routes={{ default true .Values.routeController.configureCloudRoutes }} + - --cluster-cidr={{ required "A valid .Values.routeController.clusterCIDR is required" .Values.routeController.clusterCIDR }} + {{- if .Values.routeController.routeReconciliationPeriod }} + - --route-reconciliation-period={{ .Values.routeController.routeReconciliationPeriod }} + {{- end }} + {{- end }} + {{- if .Values.sharedIPLoadBalancing }} + {{- if .Values.sharedIPLoadBalancing.bgpNodeSelector }} + - --bgp-node-selector={{ .Values.sharedIPLoadBalancing.bgpNodeSelector }} + {{- end }} + {{- if .Values.sharedIPLoadBalancing.ipHolderSuffix }} + - --ip-holder-suffix={{ .Values.sharedIPLoadBalancing.ipHolderSuffix }} + {{- end}} + - --load-balancer-type={{ required "A valid .Values.sharedIPLoadBalancing.loadBalancerType is required for shared IP load-balancing" .Values.sharedIPLoadBalancing.loadBalancerType }} + {{- end }} + {{- if .Values.tokenHealthChecker }} + - --enable-token-health-checker={{ .Values.tokenHealthChecker }} + {{- end }} + {{- if .Values.nodeBalancerTags }} + - --nodebalancer-tags={{ join " " .Values.nodeBalancerTags }} + {{- end }} + {{- if .Values.allowUnauthorizedMetrics }} + - --authorization-always-allow-paths="/metrics" + {{- end }} volumeMounts: - mountPath: /etc/kubernetes name: k8s + {{- with .Values.volumeMounts}} + {{- toYaml . | nindent 12 }} + {{- end}} env: - name: LINODE_API_TOKEN valueFrom: @@ -47,10 +91,13 @@ spec: secretKeyRef: name: {{ if .Values.secretRef }}{{ .Values.secretRef.name | default "ccm-linode" }}{{ else }}"ccm-linode"{{ end }} key: {{ if .Values.secretRef }}{{ .Values.secretRef.regionRef | default "region" }}{{ else }}"region"{{ end }} - {{if .Values.env}} - {{- toYaml .Values.env | nindent 12 }} - {{end}} + {{- with .Values.env}} + {{- toYaml . | nindent 12 }} + {{- end}} volumes: - name: k8s hostPath: - path: /etc/kubernetes \ No newline at end of file + path: /etc/kubernetes + {{- with .Values.volumes}} + {{- toYaml . | nindent 8 }} + {{- end}} diff --git a/deploy/chart/values.yaml b/deploy/chart/values.yaml index 42ee251b..40c51f65 100644 --- a/deploy/chart/values.yaml +++ b/deploy/chart/values.yaml @@ -43,9 +43,43 @@ tolerations: - key: node.kubernetes.io/unreachable operator: Exists effect: NoSchedule + +# Options for LoadBalancers backed by shared IPs instead of NodeBalancers +# sharedIPLoadBalancing: +# loadBalancerType: cilium-bgp +# bgpNodeSelector: +# ipHolderSuffix: + +# This section adds ability to enable route-controller for ccm +# routeController: +# vpcName: [Deprecated: use vpcNames instead] +# vpcNames: +# clusterCIDR: 10.0.0.0/8 +# configureCloudRoutes: true + +# Enable Linode token health checker +# tokenHealthChecker: true + # This section adds the ability to pass environment variables to adjust CCM defaults # https://github.com/linode/linode-cloud-controller-manager/blob/master/cloud/linode/loadbalancers.go # LINODE_HOSTNAME_ONLY_INGRESS type bool is supported -# env: - # - name: EXAMPLE_ENV_VAR - # value: "true" +env: +# - name: EXAMPLE_ENV_VAR +# value: "true" + +# Linode tags to apply to all NodeBalancers +nodeBalancerTags: [] + +# This section adds the ability to pass volumes to the CCM DaemonSet +volumes: +# - name: test-volume +# emptyDir: +# sizeLimit: 500Mi + +# this section adds the ability to pass volumeMounts to the CCM container +volumeMounts: +# - mountPath: /tmp/ +# name: test-volume + +# This flag allows to scrape /metrics endpoint without authorization +allowUnauthorizedMetrics: false diff --git a/deploy/generate-manifest.sh b/deploy/generate-manifest.sh index 75994adc..d61ea290 100755 --- a/deploy/generate-manifest.sh +++ b/deploy/generate-manifest.sh @@ -4,6 +4,11 @@ set -o pipefail -o noclobber -o nounset die() { echo "$*" 1>&2; exit 1; } +echo -e "\n********************************************************************" +echo -e "WARNING: This script is deprecated and may be removed in future." +echo -e "Please use helm for installs, or refer to the docs for alternatives." +echo -e "********************************************************************\n" + [ "$#" -eq 2 ] || die "First argument must be a Linode APIv4 Personal Access Token with all permissions. (https://cloud.linode.com/profile/tokens) diff --git a/devbox.json b/devbox.json new file mode 100644 index 00000000..ff3d96fa --- /dev/null +++ b/devbox.json @@ -0,0 +1,31 @@ +{ + "packages": [ + "ctlptl@latest", + "clusterctl@latest", + "docker@latest", + "envsubst@latest", + "go@1.23.3", + "golangci-lint@latest", + "jq@latest", + "kind@latest", + "kubectl@latest", + "kustomize@latest", + "kyverno-chainsaw@latest", + "mockgen@latest", + "yq-go@latest" + ], + "shell": { + "init_hook": [ + "export \"GOROOT=$(go env GOROOT)\"" + ], + "scripts": { + "mgmt-and-capl-cluster": "make mgmt-and-capl-cluster", + "e2e-test": "make e2e-test", + "e2e-test-bgp": "make e2e-test-bgp", + "cleanup-cluster": "make cleanup-cluster" + } + }, + "env": { + "EXP_CLUSTER_RESOURCE_SET": "true" + } +} diff --git a/devbox.lock b/devbox.lock new file mode 100644 index 00000000..cb767c68 --- /dev/null +++ b/devbox.lock @@ -0,0 +1,733 @@ +{ + "lockfile_version": "1", + "packages": { + "clusterctl@latest": { + "last_modified": "2024-11-03T14:18:04Z", + "resolved": "github:NixOS/nixpkgs/4ae2e647537bcdbb82265469442713d066675275#clusterctl", + "source": "devbox-search", + "version": "1.8.4", + "systems": { + "aarch64-darwin": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/5s555wvi2h0w7r99raairnfzrvmpmh6q-clusterctl-1.8.4", + "default": true + } + ], + "store_path": "/nix/store/5s555wvi2h0w7r99raairnfzrvmpmh6q-clusterctl-1.8.4" + }, + "aarch64-linux": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/1bqxwb82x5lg07a4kwc22ws342d0dwxz-clusterctl-1.8.4", + "default": true + } + ], + "store_path": "/nix/store/1bqxwb82x5lg07a4kwc22ws342d0dwxz-clusterctl-1.8.4" + }, + "x86_64-darwin": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/wj491hzqdi6bzrzm8mcznwbd1a4542km-clusterctl-1.8.4", + "default": true + } + ], + "store_path": "/nix/store/wj491hzqdi6bzrzm8mcznwbd1a4542km-clusterctl-1.8.4" + }, + "x86_64-linux": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/2vyd0iw7d2x9rlan9jjpyvpzbr3c63ic-clusterctl-1.8.4", + "default": true + } + ], + "store_path": "/nix/store/2vyd0iw7d2x9rlan9jjpyvpzbr3c63ic-clusterctl-1.8.4" + } + } + }, + "ctlptl@latest": { + "last_modified": "2024-11-03T14:18:04Z", + "resolved": "github:NixOS/nixpkgs/4ae2e647537bcdbb82265469442713d066675275#ctlptl", + "source": "devbox-search", + "version": "0.8.35", + "systems": { + "aarch64-darwin": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/1nc7z4qscr5fh7lppiiz7xb1hqrhjqpb-ctlptl-0.8.35", + "default": true + } + ], + "store_path": "/nix/store/1nc7z4qscr5fh7lppiiz7xb1hqrhjqpb-ctlptl-0.8.35" + }, + "aarch64-linux": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/9sm3jwlkdxbgkv3vz6pip5r45v8cjnhf-ctlptl-0.8.35", + "default": true + } + ], + "store_path": "/nix/store/9sm3jwlkdxbgkv3vz6pip5r45v8cjnhf-ctlptl-0.8.35" + }, + "x86_64-darwin": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/zn1s9i1s25dgb0yphp8avrfqqk7wxjbv-ctlptl-0.8.35", + "default": true + } + ], + "store_path": "/nix/store/zn1s9i1s25dgb0yphp8avrfqqk7wxjbv-ctlptl-0.8.35" + }, + "x86_64-linux": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/nd5bc6s6jxczmf1vg2vm314621dwfcak-ctlptl-0.8.35", + "default": true + } + ], + "store_path": "/nix/store/nd5bc6s6jxczmf1vg2vm314621dwfcak-ctlptl-0.8.35" + } + } + }, + "docker@latest": { + "last_modified": "2024-11-05T01:08:39Z", + "resolved": "github:NixOS/nixpkgs/a04d33c0c3f1a59a2c1cb0c6e34cd24500e5a1dc#docker", + "source": "devbox-search", + "version": "27.3.1", + "systems": { + "aarch64-darwin": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/n2l6y7zp830kasbw0xirfhqliniln54l-docker-27.3.1", + "default": true + } + ], + "store_path": "/nix/store/n2l6y7zp830kasbw0xirfhqliniln54l-docker-27.3.1" + }, + "aarch64-linux": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/30w5k9rjzsjhscahps94d0bhd7f57pv8-docker-27.3.1", + "default": true + } + ], + "store_path": "/nix/store/30w5k9rjzsjhscahps94d0bhd7f57pv8-docker-27.3.1" + }, + "x86_64-darwin": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/l4cfzw1bvvcqn0s1yyvc2pxmjz17mymv-docker-27.3.1", + "default": true + } + ], + "store_path": "/nix/store/l4cfzw1bvvcqn0s1yyvc2pxmjz17mymv-docker-27.3.1" + }, + "x86_64-linux": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/267rdap7pn4wg03q2akrm5lx9xsls6rk-docker-27.3.1", + "default": true + } + ], + "store_path": "/nix/store/267rdap7pn4wg03q2akrm5lx9xsls6rk-docker-27.3.1" + } + } + }, + "envsubst@latest": { + "last_modified": "2024-11-03T14:18:04Z", + "resolved": "github:NixOS/nixpkgs/4ae2e647537bcdbb82265469442713d066675275#envsubst", + "source": "devbox-search", + "version": "1.4.2", + "systems": { + "aarch64-darwin": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/76jp5bcfhf17znkaffqsfqdr0p1gz8cx-envsubst-1.4.2", + "default": true + } + ], + "store_path": "/nix/store/76jp5bcfhf17znkaffqsfqdr0p1gz8cx-envsubst-1.4.2" + }, + "aarch64-linux": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/c3cilncva8s3x4cmpfv29jsp1ypj35p4-envsubst-1.4.2", + "default": true + } + ], + "store_path": "/nix/store/c3cilncva8s3x4cmpfv29jsp1ypj35p4-envsubst-1.4.2" + }, + "x86_64-darwin": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/ix7y1xqrnm1gs23y0ylspi91m7490qiq-envsubst-1.4.2", + "default": true + } + ], + "store_path": "/nix/store/ix7y1xqrnm1gs23y0ylspi91m7490qiq-envsubst-1.4.2" + }, + "x86_64-linux": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/ylvk3rs98ssd24qkpxc04mji18magc9f-envsubst-1.4.2", + "default": true + } + ], + "store_path": "/nix/store/ylvk3rs98ssd24qkpxc04mji18magc9f-envsubst-1.4.2" + } + } + }, + "go@1.23.3": { + "last_modified": "2024-11-28T07:51:56Z", + "resolved": "github:NixOS/nixpkgs/226216574ada4c3ecefcbbec41f39ce4655f78ef#go", + "source": "devbox-search", + "version": "1.23.3", + "systems": { + "aarch64-darwin": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/qrj2wp6vzfpjfrrlcmr22818zg83fb73-go-1.23.3", + "default": true + } + ], + "store_path": "/nix/store/qrj2wp6vzfpjfrrlcmr22818zg83fb73-go-1.23.3" + }, + "aarch64-linux": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/dm66qyl19skrwcmk4rb9xcs64xc1d071-go-1.23.3", + "default": true + } + ], + "store_path": "/nix/store/dm66qyl19skrwcmk4rb9xcs64xc1d071-go-1.23.3" + }, + "x86_64-darwin": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/vkjn6njpz4gy5ma763vh8hh93bgjwycr-go-1.23.3", + "default": true + } + ], + "store_path": "/nix/store/vkjn6njpz4gy5ma763vh8hh93bgjwycr-go-1.23.3" + }, + "x86_64-linux": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/bavnchxi7v6xs077jxv7fl5rrqc3y87w-go-1.23.3", + "default": true + } + ], + "store_path": "/nix/store/bavnchxi7v6xs077jxv7fl5rrqc3y87w-go-1.23.3" + } + } + }, + "golangci-lint@latest": { + "last_modified": "2024-11-03T14:18:04Z", + "resolved": "github:NixOS/nixpkgs/4ae2e647537bcdbb82265469442713d066675275#golangci-lint", + "source": "devbox-search", + "version": "1.61.0", + "systems": { + "aarch64-darwin": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/vm7syji08qh6q1s7ckd777p7kcjflx9b-golangci-lint-1.61.0", + "default": true + } + ], + "store_path": "/nix/store/vm7syji08qh6q1s7ckd777p7kcjflx9b-golangci-lint-1.61.0" + }, + "aarch64-linux": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/6vx22sm4x9lmyqswq7svmih0q68c92lg-golangci-lint-1.61.0", + "default": true + } + ], + "store_path": "/nix/store/6vx22sm4x9lmyqswq7svmih0q68c92lg-golangci-lint-1.61.0" + }, + "x86_64-darwin": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/ipn5pi90mallx4d4c923h3rc7bpmiwz9-golangci-lint-1.61.0", + "default": true + } + ], + "store_path": "/nix/store/ipn5pi90mallx4d4c923h3rc7bpmiwz9-golangci-lint-1.61.0" + }, + "x86_64-linux": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/bz2kxbkb9yxdkz2pdl640g32xyqxqd4c-golangci-lint-1.61.0", + "default": true + } + ], + "store_path": "/nix/store/bz2kxbkb9yxdkz2pdl640g32xyqxqd4c-golangci-lint-1.61.0" + } + } + }, + "jq@latest": { + "last_modified": "2024-11-03T14:18:04Z", + "resolved": "github:NixOS/nixpkgs/4ae2e647537bcdbb82265469442713d066675275#jq", + "source": "devbox-search", + "version": "1.7.1", + "systems": { + "aarch64-darwin": { + "outputs": [ + { + "name": "bin", + "path": "/nix/store/mx6zrpgk9ncxmdkriivad29g6ms54lp4-jq-1.7.1-bin", + "default": true + }, + { + "name": "man", + "path": "/nix/store/13q2k3y8rabhsxknma0by8m9kvvdc5z4-jq-1.7.1-man", + "default": true + }, + { + "name": "dev", + "path": "/nix/store/3zyi01bjcf4f54khnnyhpbhg53g552mh-jq-1.7.1-dev" + }, + { + "name": "doc", + "path": "/nix/store/njqci9px1wh3nd1k0w0rdizkj7dq38sz-jq-1.7.1-doc" + }, + { + "name": "out", + "path": "/nix/store/g25q96a9y4m2y5v8acyyd16l4wml2haz-jq-1.7.1" + } + ], + "store_path": "/nix/store/mx6zrpgk9ncxmdkriivad29g6ms54lp4-jq-1.7.1-bin" + }, + "aarch64-linux": { + "outputs": [ + { + "name": "bin", + "path": "/nix/store/m749l3lg9kmld916656p0b4mb9p9i62y-jq-1.7.1-bin", + "default": true + }, + { + "name": "man", + "path": "/nix/store/061h9pv30awg36fjfhzsw5a1bh37zcdr-jq-1.7.1-man", + "default": true + }, + { + "name": "dev", + "path": "/nix/store/4y9x9d4w7np0vby58glizzaf5p55g4ak-jq-1.7.1-dev" + }, + { + "name": "doc", + "path": "/nix/store/xlpqc0zdgbi2wg0rxippj4jp7wgbqbmk-jq-1.7.1-doc" + }, + { + "name": "out", + "path": "/nix/store/ar5glhxq1x82ngnd6cni4wpfdfd06kdz-jq-1.7.1" + } + ], + "store_path": "/nix/store/m749l3lg9kmld916656p0b4mb9p9i62y-jq-1.7.1-bin" + }, + "x86_64-darwin": { + "outputs": [ + { + "name": "bin", + "path": "/nix/store/91chd95c04083fxabvjbvpnaxizji71d-jq-1.7.1-bin", + "default": true + }, + { + "name": "man", + "path": "/nix/store/2xgr0bm2dcn64jxyh7v8jg8ygdpy6g50-jq-1.7.1-man", + "default": true + }, + { + "name": "out", + "path": "/nix/store/wnxi834pnbfl9lz2ahx62z6ivsl3d3ns-jq-1.7.1" + }, + { + "name": "dev", + "path": "/nix/store/39kp2v0mkw82jkggmwlpyv0pzkav2z2y-jq-1.7.1-dev" + }, + { + "name": "doc", + "path": "/nix/store/d53nra4g8m3lfg4zqrmjmmp2b7cfbilx-jq-1.7.1-doc" + } + ], + "store_path": "/nix/store/91chd95c04083fxabvjbvpnaxizji71d-jq-1.7.1-bin" + }, + "x86_64-linux": { + "outputs": [ + { + "name": "bin", + "path": "/nix/store/wj603ds3b3gdwsrlx4nzcg4v3ba2298b-jq-1.7.1-bin", + "default": true + }, + { + "name": "man", + "path": "/nix/store/yiwlz5r6vlb6g32fczyb6zghnrizv3mq-jq-1.7.1-man", + "default": true + }, + { + "name": "dev", + "path": "/nix/store/rlxn658k96prpc4xhrzld4jwjqvkb2bz-jq-1.7.1-dev" + }, + { + "name": "doc", + "path": "/nix/store/d0cwkm74mp1mqbf3bsdkbyx94byipyzp-jq-1.7.1-doc" + }, + { + "name": "out", + "path": "/nix/store/3nsnyac45i07pfgjw5bn1kpbwaxphm70-jq-1.7.1" + } + ], + "store_path": "/nix/store/wj603ds3b3gdwsrlx4nzcg4v3ba2298b-jq-1.7.1-bin" + } + } + }, + "kind@latest": { + "last_modified": "2024-11-03T14:18:04Z", + "resolved": "github:NixOS/nixpkgs/4ae2e647537bcdbb82265469442713d066675275#kind", + "source": "devbox-search", + "version": "0.24.0", + "systems": { + "aarch64-darwin": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/ipy4vca55lan1szk9h9g7n3mrnm1lpza-kind-0.24.0", + "default": true + } + ], + "store_path": "/nix/store/ipy4vca55lan1szk9h9g7n3mrnm1lpza-kind-0.24.0" + }, + "aarch64-linux": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/bx135jsrpwq43d4hnl2c6v6qph5685zk-kind-0.24.0", + "default": true + } + ], + "store_path": "/nix/store/bx135jsrpwq43d4hnl2c6v6qph5685zk-kind-0.24.0" + }, + "x86_64-darwin": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/6g7iymghysai09dlhrddgifb2sf4zb50-kind-0.24.0", + "default": true + } + ], + "store_path": "/nix/store/6g7iymghysai09dlhrddgifb2sf4zb50-kind-0.24.0" + }, + "x86_64-linux": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/kpykmx58v8p2rddp4p1cqy24c5ym7a3z-kind-0.24.0", + "default": true + } + ], + "store_path": "/nix/store/kpykmx58v8p2rddp4p1cqy24c5ym7a3z-kind-0.24.0" + } + } + }, + "kubectl@latest": { + "last_modified": "2024-11-03T14:18:04Z", + "resolved": "github:NixOS/nixpkgs/4ae2e647537bcdbb82265469442713d066675275#kubectl", + "source": "devbox-search", + "version": "1.31.2", + "systems": { + "aarch64-darwin": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/hk0qrz3w3ivibz67fjah61gpq5dfpj7n-kubectl-1.31.2", + "default": true + }, + { + "name": "man", + "path": "/nix/store/k2dwhk2hdhdp7vbliij1jgrfm0rvj57c-kubectl-1.31.2-man", + "default": true + }, + { + "name": "convert", + "path": "/nix/store/90nf3rw5h92bzafsf24s2ijfwfbbglvy-kubectl-1.31.2-convert" + } + ], + "store_path": "/nix/store/hk0qrz3w3ivibz67fjah61gpq5dfpj7n-kubectl-1.31.2" + }, + "aarch64-linux": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/z3mlcpb4gd4n1c7c2ly7fz2j65zkcv3n-kubectl-1.31.2", + "default": true + }, + { + "name": "man", + "path": "/nix/store/6wc7cni53c0g9162z281qqmflfpp3vq7-kubectl-1.31.2-man", + "default": true + }, + { + "name": "convert", + "path": "/nix/store/kbkblm912v1lgrmqvg187kviwxfg3ywr-kubectl-1.31.2-convert" + } + ], + "store_path": "/nix/store/z3mlcpb4gd4n1c7c2ly7fz2j65zkcv3n-kubectl-1.31.2" + }, + "x86_64-darwin": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/bgq5fk947zf52ys0izx4k4m7pwq77pri-kubectl-1.31.2", + "default": true + }, + { + "name": "man", + "path": "/nix/store/xija3wpdm6jmkmlfd0y6d49vgg3098lw-kubectl-1.31.2-man", + "default": true + }, + { + "name": "convert", + "path": "/nix/store/g49s8ahgcsm2m5azd09ql7434mdzif33-kubectl-1.31.2-convert" + } + ], + "store_path": "/nix/store/bgq5fk947zf52ys0izx4k4m7pwq77pri-kubectl-1.31.2" + }, + "x86_64-linux": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/nqkn7vjqgcnp834vg0dwff4rj30v9i65-kubectl-1.31.2", + "default": true + }, + { + "name": "man", + "path": "/nix/store/zfm38523vg5frylms8klxsi8jyqh374i-kubectl-1.31.2-man", + "default": true + }, + { + "name": "convert", + "path": "/nix/store/0yj1raiv1zddfarndmgrgmd7p27cbq6m-kubectl-1.31.2-convert" + } + ], + "store_path": "/nix/store/nqkn7vjqgcnp834vg0dwff4rj30v9i65-kubectl-1.31.2" + } + } + }, + "kustomize@latest": { + "last_modified": "2024-11-03T14:18:04Z", + "resolved": "github:NixOS/nixpkgs/4ae2e647537bcdbb82265469442713d066675275#kustomize", + "source": "devbox-search", + "version": "5.5.0", + "systems": { + "aarch64-darwin": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/64vp26c9px3fzsm2ffvx2wvs8cybzbdm-kustomize-5.5.0", + "default": true + } + ], + "store_path": "/nix/store/64vp26c9px3fzsm2ffvx2wvs8cybzbdm-kustomize-5.5.0" + }, + "aarch64-linux": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/vg2hwhhs52vbbm215rb8vak8hbf86rq6-kustomize-5.5.0", + "default": true + } + ], + "store_path": "/nix/store/vg2hwhhs52vbbm215rb8vak8hbf86rq6-kustomize-5.5.0" + }, + "x86_64-darwin": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/98hqbf6yr88bdq98axpr2b2894q380fc-kustomize-5.5.0", + "default": true + } + ], + "store_path": "/nix/store/98hqbf6yr88bdq98axpr2b2894q380fc-kustomize-5.5.0" + }, + "x86_64-linux": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/wqy1ckdjjy07mngl62dxhfkcpriv3j3s-kustomize-5.5.0", + "default": true + } + ], + "store_path": "/nix/store/wqy1ckdjjy07mngl62dxhfkcpriv3j3s-kustomize-5.5.0" + } + } + }, + "kyverno-chainsaw@latest": { + "last_modified": "2024-11-05T18:23:38Z", + "resolved": "github:NixOS/nixpkgs/8c4dc69b9732f6bbe826b5fbb32184987520ff26#kyverno-chainsaw", + "source": "devbox-search", + "version": "0.2.11", + "systems": { + "aarch64-darwin": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/2dfq7856a9i2vxbw3bc3dhq6ad505hxz-kyverno-chainsaw-0.2.11", + "default": true + } + ], + "store_path": "/nix/store/2dfq7856a9i2vxbw3bc3dhq6ad505hxz-kyverno-chainsaw-0.2.11" + }, + "aarch64-linux": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/9d4ddygkw9sdvv38lgk8p68pnkmpw1dk-kyverno-chainsaw-0.2.11", + "default": true + } + ], + "store_path": "/nix/store/9d4ddygkw9sdvv38lgk8p68pnkmpw1dk-kyverno-chainsaw-0.2.11" + }, + "x86_64-darwin": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/mmsixz2pm6gwrnrlhbgrjq78891gdrxq-kyverno-chainsaw-0.2.11", + "default": true + } + ], + "store_path": "/nix/store/mmsixz2pm6gwrnrlhbgrjq78891gdrxq-kyverno-chainsaw-0.2.11" + }, + "x86_64-linux": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/2dw8y9zfs6ri4wnram125pgqgym0q29d-kyverno-chainsaw-0.2.11", + "default": true + } + ], + "store_path": "/nix/store/2dw8y9zfs6ri4wnram125pgqgym0q29d-kyverno-chainsaw-0.2.11" + } + } + }, + "mockgen@latest": { + "last_modified": "2024-11-03T14:18:04Z", + "resolved": "github:NixOS/nixpkgs/4ae2e647537bcdbb82265469442713d066675275#mockgen", + "source": "devbox-search", + "version": "0.5.0", + "systems": { + "aarch64-darwin": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/xw7pgrlkpwrvxzin1k0fs8da63nacnpa-mockgen-0.5.0", + "default": true + } + ], + "store_path": "/nix/store/xw7pgrlkpwrvxzin1k0fs8da63nacnpa-mockgen-0.5.0" + }, + "aarch64-linux": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/lrmd1ahi1sfcc518wjp6sbqmmbfic9vy-mockgen-0.5.0", + "default": true + } + ], + "store_path": "/nix/store/lrmd1ahi1sfcc518wjp6sbqmmbfic9vy-mockgen-0.5.0" + }, + "x86_64-darwin": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/i5471z9sakpqvx4s9m5kjk4xhpn22hwn-mockgen-0.5.0", + "default": true + } + ], + "store_path": "/nix/store/i5471z9sakpqvx4s9m5kjk4xhpn22hwn-mockgen-0.5.0" + }, + "x86_64-linux": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/0gamxw6355qv6b9hpzwkfsnfsvhc60f9-mockgen-0.5.0", + "default": true + } + ], + "store_path": "/nix/store/0gamxw6355qv6b9hpzwkfsnfsvhc60f9-mockgen-0.5.0" + } + } + }, + "yq-go@latest": { + "last_modified": "2024-11-03T14:18:04Z", + "resolved": "github:NixOS/nixpkgs/4ae2e647537bcdbb82265469442713d066675275#yq-go", + "source": "devbox-search", + "version": "4.44.3", + "systems": { + "aarch64-darwin": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/nypcsysgi0n88m3laa0yh94304d2k4gl-yq-go-4.44.3", + "default": true + } + ], + "store_path": "/nix/store/nypcsysgi0n88m3laa0yh94304d2k4gl-yq-go-4.44.3" + }, + "aarch64-linux": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/av2h5x1i6dcg55s9a7cq11maxjsqbmq5-yq-go-4.44.3", + "default": true + } + ], + "store_path": "/nix/store/av2h5x1i6dcg55s9a7cq11maxjsqbmq5-yq-go-4.44.3" + }, + "x86_64-darwin": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/3gw1mw88j7w5xj2lkvfkqzya5jxhhn82-yq-go-4.44.3", + "default": true + } + ], + "store_path": "/nix/store/3gw1mw88j7w5xj2lkvfkqzya5jxhhn82-yq-go-4.44.3" + }, + "x86_64-linux": { + "outputs": [ + { + "name": "out", + "path": "/nix/store/g4989ys9ngzld6mfcn0frfb82w3jlhb3-yq-go-4.44.3", + "default": true + } + ], + "store_path": "/nix/store/g4989ys9ngzld6mfcn0frfb82w3jlhb3-yq-go-4.44.3" + } + } + } + } +} diff --git a/docs/configuration/README.md b/docs/configuration/README.md new file mode 100644 index 00000000..9d8f2531 --- /dev/null +++ b/docs/configuration/README.md @@ -0,0 +1,57 @@ +# Configuration Guide + +The Linode Cloud Controller Manager (CCM) offers extensive configuration options to customize its behavior. This section covers all available configuration methods and options. + +## Configuration Areas + +1. **[LoadBalancer Services](loadbalancer.md)** + - NodeBalancer implementation + - BGP-based IP sharing + - Protocol configuration + - Health checks + - SSL/TLS setup + - Connection throttling + - [See examples](../examples/basic.md#loadbalancer-services) + +2. **[Service Annotations](annotations.md)** + - NodeBalancer configuration + - Protocol settings + - Health check options + - Port configuration + - Firewall settings + - [See annotation reference](annotations.md#available-annotations) + +3. **[Node Configuration](nodes.md)** + - Node labels and topology + - Private networking setup + - VPC configuration + - Node controller behavior + - [See node management](nodes.md#node-controller-behavior) + +4. **[Environment Variables](environment.md)** + - Cache settings + - API configuration + - Network settings + - BGP configuration + - [See environment reference](environment.md#available-variables) + +5. **[Firewall Setup](firewall.md)** + - CCM-managed firewalls + - User-managed firewalls + - Allow/deny lists + - [See firewall options](firewall.md#ccm-managed-firewalls) + +6. **[Route Configuration](routes.md)** + - VPC routing + - Pod CIDR management + - Route controller setup + - [See route management](routes.md#route-management) + +7. **[Session Affinity](session-affinity.md)** + - Client IP affinity + - Timeout configuration + - Service configuration + - [See affinity setup](session-affinity.md#configuration) + +For installation instructions, see the [Installation Guide](../getting-started/installation.md). +For troubleshooting help, see the [Troubleshooting Guide](../getting-started/troubleshooting.md). diff --git a/docs/configuration/annotations.md b/docs/configuration/annotations.md new file mode 100644 index 00000000..185627a3 --- /dev/null +++ b/docs/configuration/annotations.md @@ -0,0 +1,117 @@ +# Service Annotations + +## Overview + +Service annotations allow you to customize the behavior of your LoadBalancer services. All Service annotations must be prefixed with: `service.beta.kubernetes.io/linode-loadbalancer-` + +For implementation details, see: +- [LoadBalancer Configuration](loadbalancer.md) +- [Basic Service Examples](../examples/basic.md) +- [Advanced Configuration Examples](../examples/advanced.md) + +## Available Annotations + +### Basic Configuration + +| Annotation (Suffix) | Values | Default | Description | +|--------------------|--------|---------|-------------| +| `throttle` | `0`-`20` (`0` to disable) | `0` | Client Connection Throttle, which limits the number of subsequent new connections per second from the same client IP | +| `default-protocol` | `tcp`, `http`, `https` | `tcp` | This annotation is used to specify the default protocol for Linode NodeBalancer | +| `default-proxy-protocol` | `none`, `v1`, `v2` | `none` | Specifies whether to use a version of Proxy Protocol on the underlying NodeBalancer | +| `port-*` | json object | | Specifies port specific NodeBalancer configuration. See [Port Configuration](#port-specific-configuration) | +| `check-type` | `none`, `connection`, `http`, `http_body` | | The type of health check to perform against back-ends. See [Health Checks](loadbalancer.md#health-checks) | +| `check-path` | string | | The URL path to check on each back-end during health checks | +| `check-body` | string | | Text which must be present in the response body to pass the health check | +| `check-interval` | int | | Duration, in seconds, to wait between health checks | +| `check-timeout` | int (1-30) | | Duration, in seconds, to wait for a health check to succeed | +| `check-attempts` | int (1-30) | | Number of health check failures necessary to remove a back-end | +| `check-passive` | bool | `false` | When `true`, `5xx` status codes will cause the health check to fail | +| `preserve` | bool | `false` | When `true`, deleting a `LoadBalancer` service does not delete the underlying NodeBalancer | +| `nodebalancer-id` | string | | The ID of the NodeBalancer to front the service | +| `hostname-only-ingress` | bool | `false` | When `true`, the LoadBalancerStatus will only contain the Hostname | +| `tags` | string | | A comma separated list of tags to be applied to the NodeBalancer instance | +| `firewall-id` | string | | An existing Cloud Firewall ID to be attached to the NodeBalancer instance. See [Firewall Setup](firewall.md) | +| `firewall-acl` | string | | The Firewall rules to be applied to the NodeBalancer. See [Firewall Configuration](#firewall-configuration) | + +### Port Specific Configuration + +The `port-*` annotation allows per-port configuration, encoded in JSON. For detailed examples, see [LoadBalancer SSL/TLS Setup](loadbalancer.md#ssltls-configuration). + +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-port-443: | + "protocol": "https", + "tls-secret-name": "my-tls-secret", + "proxy-protocol": "v2" + } +``` + +Available port options: +- `protocol`: Protocol for this port (tcp, http, https) +- `tls-secret-name`: Name of TLS secret for HTTPS. The secret type should be `kubernetes.io/tls` +- `proxy-protocol`: Proxy protocol version for this port + +### Deprecated Annotations + +| Annotation (Suffix) | Values | Default | Description | Scheduled Removal | +|--------------------|--------|---------|-------------|-------------------| +| `proxy-protocol` | `none`, `v1`, `v2` | `none` | Specifies whether to use a version of Proxy Protocol on the underlying NodeBalancer | Q4 2021 | + +### Annotation Boolean Values +For annotations with bool value types, the following string values are interpreted as `true`: +- `"1"` +- `"t"` +- `"T"` +- `"true"` +- `"True"` +- `"TRUE"` + +Any other values will be interpreted as `false`. For more details, see [strconv.ParseBool](https://golang.org/pkg/strconv/#ParseBool). + +## Examples + +### Basic HTTP Service +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-default-protocol: "http" + service.beta.kubernetes.io/linode-loadbalancer-check-type: "http" + service.beta.kubernetes.io/linode-loadbalancer-check-path: "/healthz" +``` + +### HTTPS Service with TLS +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-port-443: | + { + "protocol": "https", + "tls-secret-name": "my-tls-secret" + } +``` + +### Firewall Configuration +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-firewall-acl: | + { + "allowList": { + "ipv4": ["192.168.0.0/16"], + "ipv6": ["2001:db8::/32"] + } + } +``` + +For more examples and detailed configuration options, see: +- [LoadBalancer Configuration](loadbalancer.md) +- [Firewall Configuration](firewall.md) +- [Basic Service Examples](../examples/basic.md) +- [Advanced Configuration Examples](../examples/advanced.md) +- [Complete Stack Example](../examples/complete-stack.md) + +See also: +- [Environment Variables](environment.md) +- [Route Configuration](routes.md) +- [Session Affinity](session-affinity.md) diff --git a/docs/configuration/environment.md b/docs/configuration/environment.md new file mode 100644 index 00000000..15ad47f0 --- /dev/null +++ b/docs/configuration/environment.md @@ -0,0 +1,87 @@ +# Environment Variables + +## Overview + +Environment variables provide global configuration options for the CCM. These settings affect caching, API behavior, and networking configurations. + +## Available Variables + +### Cache Configuration + +| Variable | Default | Description | +|----------|---------|-------------| +| `LINODE_INSTANCE_CACHE_TTL` | `15` | Default timeout of instance cache in seconds | +| `LINODE_ROUTES_CACHE_TTL_SECONDS` | `60` | Default timeout of route cache in seconds | + +### API Configuration + +| Variable | Default | Description | +|----------|---------|-------------| +| `LINODE_REQUEST_TIMEOUT_SECONDS` | `120` | Default timeout in seconds for http requests to linode API | +| `LINODE_URL` | `https://api.linode.com/v4` | Linode API endpoint | + +### Network Configuration + +| Variable | Default | Description | +|----------|---------|-------------| +| `LINODE_EXTERNAL_SUBNET` | "" | Mark private network as external. Example - `172.24.0.0/16` | +| `BGP_CUSTOM_ID_MAP` | "" | Use your own map instead of default region map for BGP | +| `BGP_PEER_PREFIX` | `2600:3c0f` | Use your own BGP peer prefix instead of default one | + +## Configuration Methods + +### Helm Chart +Configure via `values.yaml`: +```yaml +env: + - name: LINODE_INSTANCE_CACHE_TTL + value: "30" +``` + +### Manual Deployment +Add to the CCM DaemonSet: +```yaml +spec: + template: + spec: + containers: + - name: ccm-linode + env: + - name: LINODE_INSTANCE_CACHE_TTL + value: "30" +``` + +## Usage Guidelines + +### Cache Settings +- Adjust cache TTL based on cluster size and update frequency +- Monitor memory usage when modifying cache settings +- Consider API rate limits when decreasing TTL (see [Linode API Rate Limits](@https://techdocs.akamai.com/linode-api/reference/rate-limits)) + +### API Settings +- Increase timeout for slower network conditions +- Use default API URL unless testing/development required +- Consider regional latency when adjusting timeouts + +### Network Settings +- Configure external subnet for custom networking needs +- Use BGP settings only when implementing IP sharing +- Document any custom network configurations + +## Troubleshooting + +### Common Issues + +1. **API Timeouts** + - Check network connectivity + - Verify API endpoint accessibility + - Consider increasing timeout value + +2. **Cache Issues** + - Monitor memory usage + - Verify cache TTL settings + - Check for stale data + +For more details, see: +- [Installation Guide](../getting-started/installation.md) +- [Troubleshooting Guide](../getting-started/troubleshooting.md) diff --git a/docs/configuration/firewall.md b/docs/configuration/firewall.md new file mode 100644 index 00000000..c5d011d4 --- /dev/null +++ b/docs/configuration/firewall.md @@ -0,0 +1,83 @@ +# Firewall Setup + +## Overview + +The CCM provides two methods for securing NodeBalancers with firewalls: +1. CCM-managed Cloud Firewalls (using `firewall-acl` annotation) +2. User-managed Cloud Firewalls (using `firewall-id` annotation) + +## CCM-Managed Firewalls + +### Configuration + +Use the `firewall-acl` annotation to specify firewall rules. The rules should be provided as a JSON object with either an `allowList` or `denyList` (but not both). + +#### Allow List Configuration +```yaml +apiVersion: v1 +kind: Service +metadata: + name: restricted-service + annotations: + service.beta.kubernetes.io/linode-loadbalancer-firewall-acl: | + { + "allowList": { + "ipv4": ["192.168.0.0/16", "10.0.0.0/8"], + "ipv6": ["2001:db8::/32"] + } + } +``` + +#### Deny List Configuration +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-firewall-acl: | + { + "denyList": { + "ipv4": ["203.0.113.0/24"], + "ipv6": ["2001:db8:1234::/48"] + } + } +``` + +### Behavior +- Only one type of list (allow or deny) can be used per service +- Rules are automatically created and managed by the CCM +- Rules are updated when the annotation changes +- Firewall is deleted when the service is deleted (unless preserved) + +## User-Managed Firewalls + +### Configuration + +1. Create a Cloud Firewall in Linode Cloud Manager +2. Attach it to the service using the `firewall-id` annotation: + +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-firewall-id: "12345" +``` + +### Management +- User maintains full control over firewall rules +- Firewall persists after service deletion +- Manual updates required for rule changes + +## Best Practices + +1. **Rule Management** + - Use descriptive rule labels + - Document rule changes + - Regular security audits + +2. **IP Range Planning** + - Plan CIDR ranges carefully + - Document allowed/denied ranges + - Consider future expansion + +For more information: +- [Service Annotations](annotations.md#firewall-configuration) +- [LoadBalancer Configuration](loadbalancer.md) +- [Linode Cloud Firewall Documentation](https://www.linode.com/docs/products/networking/cloud-firewall/) diff --git a/docs/configuration/loadbalancer.md b/docs/configuration/loadbalancer.md new file mode 100644 index 00000000..c0781fe2 --- /dev/null +++ b/docs/configuration/loadbalancer.md @@ -0,0 +1,206 @@ +# LoadBalancer Services Configuration + +## Overview + +The CCM supports two types of LoadBalancer implementations: +1. Linode NodeBalancers (default) +2. BGP-based IP sharing + +For implementation examples, see [Basic Service Examples](../examples/basic.md#loadbalancer-services). + +## NodeBalancer Implementation + +When using NodeBalancers, the CCM automatically: +1. Creates and configures a NodeBalancer +2. Sets up backend nodes +3. Manages health checks +4. Handles SSL/TLS configuration + +For more details, see [Linode NodeBalancer Documentation](https://www.linode.com/docs/products/networking/nodebalancers/). + +### Basic Configuration + +Create a LoadBalancer service: +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + type: LoadBalancer + ports: + - port: 80 + targetPort: 8080 + selector: + app: my-app +``` + +See [Advanced Configuration Examples](../examples/advanced.md#loadbalancer-services) for more complex setups. + +### NodeBalancer Settings + +#### Protocol Configuration +Available protocols: +- `tcp` (default) +- `http` +- `https` + +Set the default protocol: +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-default-protocol: "http" +``` + +See [Service Annotations](annotations.md#basic-configuration) for all protocol options. + +### Health Checks + +Configure health checks using annotations: +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-check-type: "http" + service.beta.kubernetes.io/linode-loadbalancer-check-path: "/healthz" + service.beta.kubernetes.io/linode-loadbalancer-check-interval: "5" + service.beta.kubernetes.io/linode-loadbalancer-check-timeout: "3" + service.beta.kubernetes.io/linode-loadbalancer-check-attempts: "2" +``` + +Available check types: +- `none`: No health check +- `connection`: TCP connection check +- `http`: HTTP status check +- `http_body`: HTTP response body check + +For more details, see [Health Check Configuration](annotations.md#health-check-configuration). + +### SSL/TLS Configuration + +1. Create a TLS secret: +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: my-tls-secret +type: kubernetes.io/tls +data: + tls.crt: + tls.key: +``` + +2. Reference in service annotation: +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-port-443: | + { + "protocol": "https", + "tls-secret-name": "my-tls-secret" + } +``` + +### Connection Throttling + +Limit connections from the same client IP: +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-throttle: "5" +``` + +### Proxy Protocol + +Enable proxy protocol for client IP preservation: +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-default-proxy-protocol: "v2" +``` + +## BGP-based IP Sharing Implementation + +BGP-based IP sharing provides a more cost-effective solution for multiple LoadBalancer services. For detailed setup instructions, see [Cilium BGP Documentation](https://docs.cilium.io/en/stable/network/bgp-control-plane/). + +### Prerequisites +- [Cilium CNI](https://docs.cilium.io/en/stable/network/bgp-control-plane/) with BGP control plane enabled +- Additional IP provisioning enabled on your account (contact [Linode Support](https://www.linode.com/support/)) +- Nodes labeled for BGP peering + +### Configuration + +1. Enable BGP in CCM deployment: +```yaml +args: + - --load-balancer-type=cilium-bgp + - --bgp-node-selector=cilium-bgp-peering=true + - --ip-holder-suffix=mycluster +``` + +2. Label nodes that should participate in BGP peering: +```bash +kubectl label node my-node cilium-bgp-peering=true +``` + +3. Create LoadBalancer services as normal - the CCM will automatically use BGP-based IP sharing instead of creating NodeBalancers. + +### Environment Variables +- `BGP_CUSTOM_ID_MAP`: Use your own map instead of default region map for BGP +- `BGP_PEER_PREFIX`: Use your own BGP peer prefix instead of default one + +For more details, see [Environment Variables](environment.md#network-configuration). + +## Advanced Configuration + +### Using Existing NodeBalancers + +Specify an existing NodeBalancer: +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id: "12345" +``` + +### NodeBalancer Preservation + +Prevent NodeBalancer deletion when service is deleted: +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-preserve: "true" +``` + +### Port Configuration + +Configure individual ports: +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-port-443: | + { + "protocol": "https", + "tls-secret-name": "my-tls-secret", + "proxy-protocol": "v2" + } +``` + +### Tags + +Add tags to NodeBalancer: +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-tags: "production,web-tier" +``` + +## Related Documentation + +- [Service Annotations](annotations.md) +- [Firewall Configuration](firewall.md) +- [Session Affinity](session-affinity.md) +- [Environment Variables](environment.md) +- [Route Configuration](routes.md) +- [Linode NodeBalancer Documentation](https://www.linode.com/docs/products/networking/nodebalancers/) +- [Cilium BGP Documentation](https://docs.cilium.io/en/stable/network/bgp-control-plane/) +- [Basic Service Examples](../examples/basic.md) +- [Advanced Configuration Examples](../examples/advanced.md) diff --git a/docs/configuration/nodes.md b/docs/configuration/nodes.md new file mode 100644 index 00000000..62e308ef --- /dev/null +++ b/docs/configuration/nodes.md @@ -0,0 +1,96 @@ +# Node Configuration + +## Overview + +The Node Controller in CCM manages node-specific configurations and lifecycle operations for Kubernetes nodes running on Linode instances. + +## Node Labels + +The CCM automatically adds the following labels to nodes: + +### Topology Labels +Current: +- `topology.kubernetes.io/region`: Linode region (e.g., "us-east") +- `topology.kubernetes.io/zone`: Linode availability zone + +Legacy (deprecated): +- `failure-domain.beta.kubernetes.io/region`: Linode region +- `failure-domain.beta.kubernetes.io/zone`: Linode availability zone + +### Provider Labels +- `node.kubernetes.io/instance-type`: Linode instance type (e.g., "g6-standard-4") + +## Node Annotations + +All node annotations must be prefixed with: `node.k8s.linode.com/` + +### Available Annotations + +| Annotation | Type | Default | Description | +|------------|------|---------|-------------| +| `private-ip` | IPv4 | none | Overrides default detection of Node InternalIP | + +### Use Cases + +#### Private Network Configuration +```yaml +apiVersion: v1 +kind: Node +metadata: + name: my-node + annotations: + node.k8s.linode.com/private-ip: "192.168.1.100" +``` + +#### VPC Configuration +When using CCM with [Linode VPC](https://www.linode.com/docs/products/networking/vpc/), internal ip will be set to VPC ip. To use a different ip-address as internal ip, you may need to manually configure the node's InternalIP: +```yaml +apiVersion: v1 +kind: Node +metadata: + name: vpc-node + annotations: + node.k8s.linode.com/private-ip: "10.0.0.5" +``` + +## Node Networking + +### Private Network Requirements +- NodeBalancers require nodes to have linode specific [private IP addresses](https://techdocs.akamai.com/cloud-computing/docs/managing-ip-addresses-on-a-compute-instance#types-of-ip-addresses) +- Private IPs must be configured in the Linode Cloud Manager or via the API +- The CCM will use private IPs for inter-node communication + +### VPC Configuration +When using VPC: +1. Configure network interfaces in Linode Cloud Manager +2. Add appropriate node annotations for private IPs +3. Ensure proper routing configuration +4. Configure security groups if needed + +For VPC routing setup, see [Route Configuration](routes.md). + +## Node Controller Behavior + +### Node Initialization +- Configures node with Linode-specific information +- Sets node addresses (public/private IPs) +- Applies region/zone labels +- Configures node hostnames + +### Node Lifecycle Management +- Monitors node health +- Updates node status +- Handles node termination +- Manages node cleanup + +### Node Updates +- Updates node labels when region/zone changes +- Updates node addresses when IP configuration changes +- Maintains node conditions based on Linode instance status + +For more information: +- [Linode Instance Types](https://www.linode.com/docs/products/compute/compute-instances/plans/) +- [Private Networking](https://www.linode.com/docs/products/networking/private-networking/) +- [VPC Documentation](https://www.linode.com/docs/products/networking/vpc/) +- [Route Configuration](routes.md) +- [Environment Variables](environment.md) diff --git a/docs/configuration/routes.md b/docs/configuration/routes.md new file mode 100644 index 00000000..0f8b8a26 --- /dev/null +++ b/docs/configuration/routes.md @@ -0,0 +1,100 @@ +# Route Configuration + +## Overview + +The Route Controller manages network routes for pod communication in VPC environments. It ensures proper connectivity between nodes and pods across the cluster by configuring routes in Linode VPC. + +## Prerequisites + +- Kubernetes cluster running in Linode VPC +- CCM with route controller enabled +- Proper API permissions + +## Configuration + +### Enable Route Controller + +1. Via Helm chart in `values.yaml`: +```yaml +routeController: + vpcNames: "vpc-prod,vpc-staging" # Comma separated names of VPCs managed by CCM + clusterCIDR: "10.0.0.0/8" # Pod CIDR range + configureCloudRoutes: true # Enable route controller +``` + +2. Via command line flags in CCM deployment: +```yaml +spec: + template: + spec: + containers: + - name: ccm-linode + args: + - --configure-cloud-routes=true + - --vpc-names=vpc-prod,vpc-staging + - --cluster-cidr=10.0.0.0/8 +``` + +### Environment Variables + +| Variable | Default | Description | +|----------|---------|-------------| +| `LINODE_ROUTES_CACHE_TTL_SECONDS` | `60` | Default timeout of route cache in seconds | + +## Route Management + +### Automatic Operations + +The Route Controller: +- Creates routes for pod CIDR ranges assigned to nodes +- Updates routes when nodes are added/removed +- Manages route tables in specified VPCs +- Handles route cleanup during node removal +- Maintains route cache for performance + +### Route Types + +1. **Pod CIDR Routes** + - Created for each node's pod CIDR allocation + - Target is node's private IP address + - Automatically managed based on node lifecycle + +2. **VPC Routes** + - Managed within specified VPCs + - Enables cross-node pod communication + - Automatically updated with topology changes + +## Best Practices + +### CIDR Planning +- Ensure pod CIDR range doesn't overlap with node's VPC ip-address +- Plan for future cluster growth +- Document CIDR allocations + +### VPC Configuration +- Use clear, descriptive VPC names +- Configure proper VPC security settings +- Ensure proper API permissions + +## Troubleshooting + +### Common Issues + +1. **Route Creation Failures** + - Verify API permissions + - Check for CIDR conflicts + - Validate VPC configuration + - Ensure node private IPs are configured + +2. **Pod Communication Issues** + - Verify route table entries + - Check VPC network ACLs + - Validate node networking + - Confirm pod CIDR assignments + +## Related Documentation + +- [VPC Configuration](https://www.linode.com/docs/products/networking/vpc/) +- [Node Configuration](nodes.md) +- [Environment Variables](environment.md) +- [Kubernetes Network Policies](https://kubernetes.io/docs/concepts/services-networking/network-policies/) diff --git a/docs/configuration/session-affinity.md b/docs/configuration/session-affinity.md new file mode 100644 index 00000000..78683c90 --- /dev/null +++ b/docs/configuration/session-affinity.md @@ -0,0 +1,60 @@ +# Session Affinity + +## Overview + +Session affinity (also known as sticky sessions) ensures that requests from the same client are consistently routed to the same backend pod. In Kubernetes, sessionAffinity refers to a mechanism that allows a client to always be redirected to the same pod when the client hits a service. + +## Configuration + +### Basic Setup + +Enable session affinity by setting `service.spec.sessionAffinity` to `ClientIP`: +```yaml +apiVersion: v1 +kind: Service +metadata: + name: wordpress-lsmnl-wordpress + namespace: wordpress-lsmnl + labels: + app: wordpress-lsmnl-wordpress +spec: + type: LoadBalancer + selector: + app: wordpress-lsmnl-wordpress + sessionAffinity: ClientIP +``` + +### Setting Timeout + +Configure the maximum session sticky time using `sessionAffinityConfig`: +```yaml +apiVersion: v1 +kind: Service +metadata: + name: my-service +spec: + type: LoadBalancer + sessionAffinity: ClientIP + sessionAffinityConfig: + clientIP: + timeoutSeconds: 10800 # 3 hours +``` + +## Configuration Options + +### Session Affinity Types +- `None`: No session affinity (default) +- `ClientIP`: Route based on client's IP address. All requests from the same client IP will be directed to the same pod. + +### Timeout Configuration +- `timeoutSeconds`: Duration to maintain affinity +- Default: 10800 seconds (3 hours) +- Valid range: 1 to 86400 seconds (24 hours) +- After the timeout period, client requests may be routed to a different pod + +## Related Documentation + +- [Service Configuration](annotations.md) +- [LoadBalancer Configuration](loadbalancer.md) +- [Kubernetes Services Documentation](https://kubernetes.io/docs/concepts/services-networking/service/#session-affinity) +- [Service Selectors](https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service) diff --git a/docs/development/README.md b/docs/development/README.md new file mode 100644 index 00000000..552c1fb6 --- /dev/null +++ b/docs/development/README.md @@ -0,0 +1,121 @@ +# Development Guide + +## Prerequisites + +The Linode Cloud Controller Manager development requires: +- A fairly up-to-date GNU tools environment +- Go 1.23 or higher + +### Setting Up Development Environment + +#### Option 1: Using Devbox (Recommended) +The simplest way to set up your development environment is using [Devbox](https://www.jetpack.io/devbox/): + +1. Install Devbox by following the instructions at [jetpack.io/devbox/docs/installing_devbox/](https://www.jetpack.io/devbox/docs/installing_devbox/) + +2. Start the development environment: +```bash +devbox shell +``` + +This will automatically set up all required dependencies and tools for development. + +#### Option 2: Manual Setup + +1. If you haven't set up a Go development environment, follow [these instructions](https://golang.org/doc/install) to install Go. + +On macOS, you can use Homebrew: +```bash +brew install golang +``` + +## Getting Started + +### Download Source +```bash +go get github.com/linode/linode-cloud-controller-manager +cd $(go env GOPATH)/src/github.com/linode/linode-cloud-controller-manager +``` + +### Building the Project + +#### Build Binary +Use the following Make targets to build and run a local binary: + +```bash +# Build the binary +make build + +# Run the binary +make run + +# You can also run the binary directly to pass additional args +dist/linode-cloud-controller-manager +``` + +#### Building Docker Images +To build and push a Docker image: + +```bash +# Set the repo/image:tag with the TAG environment variable +# Then run the docker-build make target +IMG=linode/linode-cloud-controller-manager:canary make docker-build + +# Push Image +IMG=linode/linode-cloud-controller-manager:canary make docker-push +``` + +To run the Docker image: +```bash +docker run -ti linode/linode-cloud-controller-manager:canary +``` + +### Managing Dependencies +The Linode Cloud Controller Manager uses [Go Modules](https://blog.golang.org/using-go-modules) to manage dependencies. + +To update or add dependencies: +```bash +go mod tidy +``` + +## Development Guidelines + +### Code Quality Standards +- Write correct, up-to-date, bug-free, fully functional, secure, and efficient code +- Use the latest stable version of Go +- Follow Go idioms and best practices +- Implement proper error handling with custom error types when beneficial +- Include comprehensive input validation +- Utilize built-in language features for performance optimization +- Follow relevant design patterns and principles +- Leave NO todos, placeholders, or incomplete implementations + +### Code Structure +- Include necessary imports and declarations +- Implement proper logging using appropriate logging mechanisms +- Consider implementing middleware or interceptors for cross-cutting concerns +- Structure code in a modular and maintainable way +- Use appropriate naming conventions and code organization + +### Security & Performance +- Implement security best practices +- Consider rate limiting when appropriate +- Include authentication/authorization where needed +- Optimize for performance while maintaining readability +- Consider scalability in design decisions + +### Documentation & Testing +- Provide brief comments for complex logic or language-specific idioms +- Include clear documentation for public interfaces +- Write tests using appropriate testing frameworks +- Document any assumptions or limitations + +### Pull Request Process +1. Ensure your code follows the project's coding standards +2. Update documentation as needed +3. Add or update tests as appropriate +4. Make sure all tests pass locally +5. Submit the PR with a clear description of the changes + +## Getting Help +For development related questions or discussions, join us in #linode on the [Kubernetes Slack](https://kubernetes.slack.com/messages/CD4B15LUR/details/). \ No newline at end of file diff --git a/docs/examples/README.md b/docs/examples/README.md new file mode 100644 index 00000000..606e1589 --- /dev/null +++ b/docs/examples/README.md @@ -0,0 +1,23 @@ +# Examples + +This section provides working examples of common CCM configurations. Each example includes a complete service and deployment configuration. + +## Available Examples + +1. **[Basic Services](basic.md)** + - HTTP LoadBalancer + - HTTPS LoadBalancer with TLS termination + +2. **[Advanced Configuration](advanced.md)** + - Custom Health Checks + - Firewalled Services + - Session Affinity + - Shared IP Load-Balancing + - Custom Node Selection + +For testing these examples, see the [test script](https://github.com/linode/linode-cloud-controller-manager/blob/master/examples/test.sh). + +For more configuration options, see: +- [Service Annotations](../configuration/annotations.md) +- [LoadBalancer Configuration](../configuration/loadbalancer.md) +- [Firewall Configuration](../configuration/firewall.md) diff --git a/docs/examples/advanced.md b/docs/examples/advanced.md new file mode 100644 index 00000000..42fd8e64 --- /dev/null +++ b/docs/examples/advanced.md @@ -0,0 +1,140 @@ +# Advanced Configuration + +## Custom Health Checks + +Service with custom health check configuration: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: web-healthcheck + annotations: + service.beta.kubernetes.io/linode-loadbalancer-check-type: "http" + service.beta.kubernetes.io/linode-loadbalancer-check-path: "/healthz" + service.beta.kubernetes.io/linode-loadbalancer-check-interval: "5" + service.beta.kubernetes.io/linode-loadbalancer-check-timeout: "3" + service.beta.kubernetes.io/linode-loadbalancer-check-attempts: "2" + service.beta.kubernetes.io/linode-loadbalancer-check-passive: "true" +spec: + type: LoadBalancer + ports: + - port: 80 + selector: + app: web +``` + +## Firewalled Services + +Service with firewall rules: + +```yaml +kind: Service +apiVersion: v1 +metadata: + name: restricted-access + annotations: + service.beta.kubernetes.io/linode-loadbalancer-firewall-acl: | + { + "allowList": { + "ipv4": ["192.166.0.0/16", "172.23.41.0/24"], + "ipv6": ["2001:DB8::/128"] + } + } +spec: + type: LoadBalancer + selector: + app: restricted-app + ports: + - name: http + port: 80 + targetPort: 8080 +``` + +## Session Affinity + +Service with sticky sessions: + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: sticky-service +spec: + type: LoadBalancer + sessionAffinity: ClientIP + sessionAffinityConfig: + clientIP: + timeoutSeconds: 100 + selector: + app: sticky-app + ports: + - port: 80 + targetPort: 8080 +``` + +## Shared IP Load-Balancing + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: shared-ip-service +spec: + type: LoadBalancer + selector: + app: web + ports: + - port: 80 + targetPort: 8080 +--- +# Required DaemonSet configuration for shared IP +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: ccm-linode + namespace: kube-system +spec: + template: + spec: + containers: + - image: linode/linode-cloud-controller-manager:latest + name: ccm-linode + env: + - name: LINODE_URL + value: https://api.linode.com/v4 + args: + - --bgp-node-selector=cilium-bgp-peering=true + - --load-balancer-type=cilium-bgp + - --ip-holder-suffix=myclustername1 +``` + +## Custom Node Selection + +```yaml +apiVersion: v1 +kind: Service +metadata: + name: custom-nodes +spec: + type: LoadBalancer + selector: + app: custom-app + ports: + - port: 80 + # Only use nodes with specific labels + externalTrafficPolicy: Local +--- +# Example node with custom annotation +apiVersion: v1 +kind: Node +metadata: + name: custom-node + annotations: + node.k8s.linode.com/private-ip: "192.168.1.100" +``` + +For more examples, see: +- [Service Annotations](../configuration/annotations.md) +- [Firewall Configuration](../configuration/firewall.md) +- [LoadBalancer Configuration](../configuration/loadbalancer.md) diff --git a/docs/examples/basic.md b/docs/examples/basic.md new file mode 100644 index 00000000..d15ff070 --- /dev/null +++ b/docs/examples/basic.md @@ -0,0 +1,107 @@ +# Basic Services + +## HTTP LoadBalancer + +Basic HTTP LoadBalancer service with nginx: + +```yaml +kind: Service +apiVersion: v1 +metadata: + name: http-lb + annotations: + service.beta.kubernetes.io/linode-loadbalancer-default-protocol: "http" +spec: + type: LoadBalancer + selector: + app: nginx-http-example + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 80 + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-http-deployment +spec: + replicas: 2 + selector: + matchLabels: + app: nginx-http-example + template: + metadata: + labels: + app: nginx-http-example + spec: + containers: + - name: nginx + image: nginx + ports: + - containerPort: 80 + protocol: TCP +``` + +## HTTPS LoadBalancer + +HTTPS LoadBalancer with TLS termination: + +```yaml +kind: Service +apiVersion: v1 +metadata: + name: https-lb + annotations: + service.beta.kubernetes.io/linode-loadbalancer-throttle: "4" + service.beta.kubernetes.io/linode-loadbalancer-default-protocol: "http" + service.beta.kubernetes.io/linode-loadbalancer-port-443: | + { + "tls-secret-name": "example-secret", + "protocol": "https" + } +spec: + type: LoadBalancer + selector: + app: nginx-https-example + ports: + - name: http + protocol: TCP + port: 80 + targetPort: http + - name: https + protocol: TCP + port: 443 + targetPort: https + +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: nginx-https-deployment +spec: + replicas: 2 + selector: + matchLabels: + app: nginx-https-example + template: + metadata: + labels: + app: nginx-https-example + spec: + containers: + - name: nginx + image: nginx + ports: + - name: http + containerPort: 80 + protocol: TCP + - name: https + containerPort: 443 + protocol: TCP +``` + +For more configuration options, see: +- [Service Annotations](../configuration/annotations.md) +- [LoadBalancer Configuration](../configuration/loadbalancer.md) diff --git a/docs/getting-started/README.md b/docs/getting-started/README.md new file mode 100644 index 00000000..9850e6d3 --- /dev/null +++ b/docs/getting-started/README.md @@ -0,0 +1,13 @@ +# Getting Started + +This section will guide you through: +- Understanding the CCM's requirements +- Installing the CCM using either Helm or manual installation +- Verifying your installation +- Troubleshooting common issues + +Choose the installation method that best suits your needs: +- **Helm Installation**: Recommended for most users, provides easier upgrades and configuration +- **Manual Installation**: Offers more control over the deployment process + +Before proceeding with installation, make sure to review the requirements section to ensure your environment is properly configured. diff --git a/docs/getting-started/helm-installation.md b/docs/getting-started/helm-installation.md new file mode 100644 index 00000000..449166e6 --- /dev/null +++ b/docs/getting-started/helm-installation.md @@ -0,0 +1,61 @@ +# Helm Installation + +## Prerequisites +- Helm 3.x installed +- kubectl configured to access your cluster +- Linode API token +- Target region identified + +## Installation Steps + +1. Add the CCM Helm repository: +```bash +helm repo add ccm-linode https://linode.github.io/linode-cloud-controller-manager/ +helm repo update ccm-linode +``` + +2. Create a values file (values.yaml): +```yaml +apiToken: "your-api-token" +region: "us-east" + +# Optional: Configure route controller +routeController: + vpcNames: "" # Comma separated VPC names + clusterCIDR: "10.0.0.0/8" + configureCloudRoutes: true + +# Optional: Configure shared IP load balancing instead of NodeBalancers (requires Cilium CNI and BGP Control Plane enabled) +sharedIPLoadBalancing: + loadBalancerType: cilium-bgp + bgpNodeSelector: cilium-bgp-peering=true + ipHolderSuffix: "" + +# Optional: Allow /metrics scraping without authorization on secure HTTPS port (10253 by default) +allowUnauthorizedMetrics=true +``` + +3. Install the CCM: +```bash +helm install ccm-linode \ + --namespace kube-system \ + -f values.yaml \ + ccm-linode/ccm-linode +``` + +## Upgrading + +To upgrade an existing installation: +```bash +helm upgrade ccm-linode \ + --namespace kube-system \ + -f values.yaml \ + ccm-linode/ccm-linode +``` + +## Uninstalling + +To remove the CCM: +```bash +helm uninstall ccm-linode -n kube-system +``` diff --git a/docs/getting-started/installation.md b/docs/getting-started/installation.md new file mode 100644 index 00000000..ea47f6df --- /dev/null +++ b/docs/getting-started/installation.md @@ -0,0 +1,20 @@ +# Installation + +The CCM can be installed using either Helm (recommended) or by manually applying manifests. Choose the method that best suits your needs: + +## Installation Methods + +### [Helm Installation](helm-installation.md) +- Easier to manage and upgrade +- Configurable through values.yaml +- Supports templating for different environments + +### [Manual Installation](manual-installation.md) +- More control over the deployment +- Better for customized setups +- Useful for understanding the components + +## Post-Installation +After installing the CCM, proceed to the [Verification](verification.md) section to ensure everything is working correctly. + +If you encounter any issues, check the [Troubleshooting](troubleshooting.md) guide. diff --git a/docs/getting-started/manual-installation.md b/docs/getting-started/manual-installation.md new file mode 100644 index 00000000..7c4af09e --- /dev/null +++ b/docs/getting-started/manual-installation.md @@ -0,0 +1,66 @@ +# Manual Installation + +## Prerequisites +- kubectl configured to access your cluster +- Linode API token +- Target region identified + +## Installation Steps + +1. Generate the manifest: +```bash +./deploy/generate-manifest.sh $LINODE_API_TOKEN $REGION +``` + +2. Review the generated manifest: +The script creates `ccm-linode.yaml` containing: +- ServiceAccount +- ClusterRole and ClusterRoleBinding +- Secret with API token +- DaemonSet for the CCM + +3. Apply the manifest: +```bash +kubectl apply -f ccm-linode.yaml +``` + +## Customization + +### Environment Variables +You can modify the DaemonSet to include custom environment variables: +```yaml +env: + - name: LINODE_INSTANCE_CACHE_TTL + value: "15" + - name: LINODE_ROUTES_CACHE_TTL_SECONDS + value: "60" +``` + +### Resource Limits +Adjust compute resources as needed: +```yaml +resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 200m + memory: 256Mi +``` + +### Prometheus metrics + +Cloud Controller Manager exposes metrics by default on port given by +`--secure-port` flag. The endpoint is protected from unauthenticated access by +default. To allow unauthenticated clients (`system:anonymous`) access +Prometheus metrics, use `--authorization-always-allow-paths="/metrics"` +command-line flag. + +Linode API calls can be monitored using `ccm_linode_client_requests_total` metric. + +## Uninstalling + +To remove the CCM: +```bash +kubectl delete -f ccm-linode.yaml +``` diff --git a/docs/getting-started/overview.md b/docs/getting-started/overview.md new file mode 100644 index 00000000..39fe3f0e --- /dev/null +++ b/docs/getting-started/overview.md @@ -0,0 +1,34 @@ +# Overview + +The Linode Cloud Controller Manager provides several key features that enable a fully supported Kubernetes experience on Linode infrastructure. + +## Features + +### LoadBalancer Services +- Automatic deployment and configuration of Linode NodeBalancers +- Support for HTTP, HTTPS, and TCP traffic +- SSL/TLS termination +- Custom health checks and session affinity + +### Node Management +- Automatic configuration of node hostnames and network addresses +- Proper node state management for Linode shutdowns +- Region-based node annotation for failure domain scheduling + +### Network Integration +- Support for private networking +- VPC and VLAN compatibility +- BGP-based IP sharing capabilities + +### Security +- Integrated firewall management +- Support for TLS termination +- Custom security rules and ACLs + +## When to Use CCM + +The Linode CCM is essential when: +- Running Kubernetes clusters on Linode infrastructure +- Requiring automated load balancer provisioning +- Needing integrated cloud provider features +- Managing multi-node clusters with complex networking requirements \ No newline at end of file diff --git a/docs/getting-started/requirements.md b/docs/getting-started/requirements.md new file mode 100644 index 00000000..463d4bc3 --- /dev/null +++ b/docs/getting-started/requirements.md @@ -0,0 +1,54 @@ +# Requirements + +Before installing the Linode Cloud Controller Manager, ensure your environment meets the following requirements. + +## Kubernetes Cluster Requirements + +### Version Compatibility +- Kubernetes version 1.22 or higher +- Kubernetes cluster running on Linode infrastructure + +### Kubernetes Components Configuration +The following Kubernetes components must be started with the `--cloud-provider=external` flag: +- Kubelet +- Kube Controller Manager +- Kube API Server + +## Linode Requirements + +### API Token +You need a Linode APIv4 Personal Access Token with the following scopes: +- Linodes - Read/Write +- NodeBalancers - Read/Write +- IPs - Read/Write +- Volumes - Read/Write +- Firewalls - Read/Write (if using firewall features) +- VPCs - Read/Write (if using VPC features) +- VLANs - Read/Write (if using VLAN features) + +To create a token: +1. Log into the [Linode Cloud Manager](https://cloud.linode.com) +2. Go to your profile +3. Select the "API Tokens" tab +4. Click "Create a Personal Access Token" +5. Select the required scopes +6. Set an expiry (optional) + +### Region Support +Your cluster must be in a [supported Linode region](https://api.linode.com/v4/regions). + +## Network Requirements + +### Private Networking +- If using NodeBalancers, nodes must have private IP addresses +- VPC or VLAN configurations require additional network configuration + +### Firewall Considerations +- Ensure required ports are open for Kubernetes components +- If using Cloud Firewalls, ensure the API token has firewall management permissions + +## Resource Quotas +Ensure your Linode account has sufficient quota for: +- NodeBalancers (if using LoadBalancer services) +- Additional IP addresses (if using shared IP features) +- Cloud Firewalls (if using firewall features) diff --git a/docs/getting-started/troubleshooting.md b/docs/getting-started/troubleshooting.md new file mode 100644 index 00000000..5e613f8d --- /dev/null +++ b/docs/getting-started/troubleshooting.md @@ -0,0 +1,96 @@ +# Troubleshooting + +## Common Issues and Solutions + +### CCM Pod Issues + +#### Pod Won't Start +```bash +kubectl get pods -n kube-system -l app=ccm-linode +kubectl describe pod -n kube-system -l app=ccm-linode +``` + +Common causes: +- Invalid API token +- Missing RBAC permissions +- Resource constraints + +#### Pod Crashes +Check the logs: +```bash +kubectl logs -n kube-system -l app=ccm-linode +``` + +Common causes: +- API rate limiting +- Network connectivity issues +- Configuration errors + +### LoadBalancer Service Issues + +#### Service Stuck in Pending +```bash +kubectl describe service +``` + +Check for: +- API token permissions +- NodeBalancer quota limits +- Network configuration + +#### Health Checks Failing +Verify: +- Backend pod health +- Service port configuration +- Health check path configuration + +### Node Issues + +#### Missing Node Labels +```bash +kubectl get nodes --show-labels +``` + +Verify: +- CCM node controller logs +- Node annotations +- API permissions + +#### Network Problems +Check: +- Private IP configuration +- VPC/VLAN setup +- Firewall rules + +## Gathering Information + +### Useful Commands +```bash +# Get CCM version +kubectl get pods -n kube-system -l app=ccm-linode -o jsonpath='{.items[0].spec.containers[0].image}' + +# Check events +kubectl get events -n kube-system + +# Get CCM logs with timestamps +kubectl logs -n kube-system -l app=ccm-linode --timestamps +``` + +### Debug Mode +Set the following environment variable in the CCM deployment: +```yaml +env: + - name: LINODE_DEBUG + value: "1" +``` + +## Getting Help + +If issues persist: +1. Join #linode on [Kubernetes Slack](https://kubernetes.slack.com) +2. Check [GitHub Issues](https://github.com/linode/linode-cloud-controller-manager/issues) +3. Submit a new issue with: + - CCM version + - Kubernetes version + - Relevant logs + - Steps to reproduce diff --git a/docs/getting-started/verification.md b/docs/getting-started/verification.md new file mode 100644 index 00000000..22e7c046 --- /dev/null +++ b/docs/getting-started/verification.md @@ -0,0 +1,63 @@ +# Verification + +After installing the CCM, follow these steps to verify it's working correctly. + +## Check CCM Pod Status + +1. Verify the CCM pods are running: +```bash +kubectl get pods -n kube-system -l app=ccm-linode +``` + +Expected output: +``` +NAME READY STATUS RESTARTS AGE +ccm-linode-xxxxx 1/1 Running 0 2m +``` + +2. Check CCM logs: +```bash +kubectl logs -n kube-system -l app=ccm-linode +``` + +Look for successful initialization messages and no errors. + +## Verify Node Configuration + +1. Check node annotations: +```bash +kubectl get nodes -o yaml +``` + +Look for: +- Proper region labels +- Node addresses +- Provider ID + +## Test LoadBalancer Service + +1. Create a test service: +```yaml +apiVersion: v1 +kind: Service +metadata: + name: test-lb +spec: + type: LoadBalancer + ports: + - port: 80 + selector: + app: test +``` + +2. Verify NodeBalancer creation: +```bash +kubectl get svc test-lb +``` + +The service should receive an external IP address. + +## Common Issues +- Pods in CrashLoopBackOff: Check logs for API token or permissions issues +- Service stuck in 'Pending': Verify API token has NodeBalancer permissions +- Missing node annotations: Check CCM logs for node controller issues diff --git a/e2e/Makefile b/e2e/Makefile deleted file mode 100644 index a7218b20..00000000 --- a/e2e/Makefile +++ /dev/null @@ -1,34 +0,0 @@ -IMG ?= linode/linode-cloud-controller-manager:latest -GINKGO_PATH := $(shell go env GOPATH)/bin/ginkgo - -REUSED_KUBECONFIG := test/ccm-linode-for-reuse.conf - -ifneq ("$(wildcard $(REUSED_KUBECONFIG))","") - CONTROL_PLANE_IP := $(shell grep -E -o "([0-9]{1,3}[\.]){3}[0-9]{1,3}" $(REUSED_KUBECONFIG)) -endif - -.PHONY: test reuse-and-test clean - -${GINKGO_PATH}: - go install github.com/onsi/ginkgo/v2/ginkgo@v2.13.2 - -reuse-and-test: SUITE_ARGS='--reuse' - -test reuse-and-test: ${GINKGO_PATH} check-token - go list -m; \ - ginkgo -r --vv --trace $(TEST_ARGS) -- --image=${IMG} $(SUITE_ARGS) - -clean: check-token - cd test; \ - ./scripts/delete_cluster.sh ccm-linode-for-reuse; \ - rm terraform.tfstate; \ - rm -rf terraform.tfstate.d - -check-token: - @if test "$(LINODE_API_TOKEN)" = "" ; then \ - echo "LINODE_API_TOKEN must be set"; \ - exit 1; \ - fi - -control-plane-ssh: $(REUSED_KUBECONFIG) - ssh root@$(CONTROL_PLANE_IP) diff --git a/e2e/README.md b/e2e/README.md deleted file mode 100644 index 15080aa0..00000000 --- a/e2e/README.md +++ /dev/null @@ -1,4 +0,0 @@ -## How to run these End-to-end (e2e) tests - -TBD: the way we run these e2e tests has recently changed, there is currently -no convenient way to run these with no pre-existing clusters. diff --git a/e2e/bgp-test/lb-cilium-bgp/chainsaw-test.yaml b/e2e/bgp-test/lb-cilium-bgp/chainsaw-test.yaml new file mode 100644 index 00000000..979bcac6 --- /dev/null +++ b/e2e/bgp-test/lb-cilium-bgp/chainsaw-test.yaml @@ -0,0 +1,139 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: cilium-bgp-test +spec: + namespace: "cilium-bgp-test" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../../test/assert-ccm-resources.yaml + - name: Create a pod and service with load balancer type cilium-bgp + try: + - apply: + file: create-pod-service.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Verify CiliumLoadBalancerIPPool creation + try: + - assert: + resource: + apiVersion: cilium.io/v2alpha1 + kind: CiliumLoadBalancerIPPool + metadata: + name: cilium-bgp-test-test-bgp-svc-pool + spec: + disabled: false + - name: Verify CiliumBGPPeeringPolicy + try: + - assert: + resource: + apiVersion: cilium.io/v2alpha1 + kind: CiliumBGPPeeringPolicy + metadata: + name: linode-ccm-bgp-peering + spec: + nodeSelector: + matchLabels: + cilium-bgp-peering: "true" + - name: Check LoadBalancer IP assignment + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: test-bgp-svc + status: + conditions: + - status: "True" + type: cilium.io/IPAMRequestSatisfied + - name: Verify IP sharing on labeled nodes + try: + - script: + content: | + set -e + + delete_nanode() { + local NANODE_RESPONSE=$(curl -s -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/linode/instances") + + local NANODE_ID=$(echo "$NANODE_RESPONSE" | \ + jq -r --arg cluster "$CLUSTER_NAME" '.data[] | select(.label | endswith($cluster)) | .id') + + if [ -n "$NANODE_ID" ]; then + curl -s -X DELETE -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/linode/instances/$NANODE_ID" || true + fi + } + + # Get the LoadBalancer IP + LB_IP=$(kubectl get svc test-bgp-svc -n cilium-bgp-test -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + + # Get nodes with BGP label + BGP_NODES=$(kubectl get nodes -l cilium-bgp-peering=true -o name) + + if [ -z "$BGP_NODES" ]; then + echo "No nodes found with label cilium-bgp-peering=true" + delete_nanode + exit 1 + fi + + # Check if IP is shared on each BGP node + for node in $BGP_NODES; do + NODE_ID=$(kubectl get $node -o jsonpath='{.spec.providerID}' | sed 's|linode://||') + echo "Node ID: $NODE_ID" + + NODE_IP_RESPONSE=$(curl -s -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/linode/instances/$NODE_ID/ips") + + SHARED_IPS=$(echo "$NODE_IP_RESPONSE" | jq -r '.ipv4.shared[]?.address // empty') + echo "shared IPs: $SHARED_IPS" + + if [ -n "$SHARED_IPS" ] && ! echo "$SHARED_IPS" | grep -q "$LB_IP"; then + echo "LoadBalancer IP $LB_IP not found in shared IPs of node $node" + delete_nanode + exit 1 + fi + done + + # Check if the nanode has the shared IP + NANODE_RESPONSE=$(curl -s -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/linode/instances") + + NANODE_ID=$(echo "$NANODE_RESPONSE" | \ + jq -r --arg cluster "$CLUSTER_NAME" '.data[] | select(.label | endswith($cluster)) | .id') + + if [ -z "$NANODE_ID" ]; then + echo "No nanode found for cluster $CLUSTER_NAME" + exit 0 + fi + + NANODE_IP_RESPONSE=$(curl -s -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/linode/instances/$NANODE_ID/ips") + + NANODE_IPS=$(echo "$NANODE_IP_RESPONSE" | jq -r '.ipv4.public[]?.address // empty') + + if [ -n "$NANODE_IPS" ] && ! echo "$NANODE_IPS" | grep -q "$LB_IP"; then + echo "LoadBalancer IP not found in nanode IPs" + delete_nanode + exit 1 + fi + + echo "Successfully found LoadBalancer IP in nanode IPs" + + # Delete the nanode on success + delete_nanode + check: + ($error == null): true + (contains($stdout, 'LoadBalancer IP not found in shared IPs of node')): false + (contains($stdout, 'LoadBalancer IP not found in nanode IPs')): false + (contains($stdout, 'Successfully found LoadBalancer IP in nanode IPs')): true + diff --git a/e2e/bgp-test/lb-cilium-bgp/create-pod-service.yaml b/e2e/bgp-test/lb-cilium-bgp/create-pod-service.yaml new file mode 100644 index 00000000..b2f96238 --- /dev/null +++ b/e2e/bgp-test/lb-cilium-bgp/create-pod-service.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Pod +metadata: + name: test-pod-1 + labels: + app: test-bgp +spec: + containers: + - name: nginx + image: nginx:latest + ports: + - containerPort: 80 +--- +apiVersion: v1 +kind: Service +metadata: + name: test-bgp-svc +spec: + type: LoadBalancer + ports: + - port: 80 + targetPort: 80 + selector: + app: test-bgp diff --git a/e2e/go.mod b/e2e/go.mod deleted file mode 100644 index 1b4b2e84..00000000 --- a/e2e/go.mod +++ /dev/null @@ -1,76 +0,0 @@ -module e2e_test - -go 1.20 - -require ( - github.com/appscode/go v0.0.0-20200323182826-54e98e09185a - github.com/linode/linodego v1.26.0 - github.com/onsi/ginkgo/v2 v2.13.2 - github.com/onsi/gomega v1.30.0 - k8s.io/api v0.23.17 - k8s.io/apimachinery v0.23.17 - k8s.io/client-go v0.23.17 -) - -require ( - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/go-logr/logr v1.3.0 // indirect - github.com/go-resty/resty/v2 v2.9.1 // indirect - github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.3 // indirect - github.com/google/go-cmp v0.6.0 // indirect - github.com/google/gofuzz v1.1.0 // indirect - github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 // indirect - github.com/googleapis/gnostic v0.5.5 // indirect - github.com/imdario/mergo v0.3.5 // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/spf13/pflag v1.0.5 // indirect - golang.org/x/net v0.19.0 // indirect - golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect - golang.org/x/sys v0.15.0 // indirect - golang.org/x/term v0.15.0 // indirect - golang.org/x/text v0.14.0 // indirect - golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect - golang.org/x/tools v0.14.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.28.0 // indirect - gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/ini.v1 v1.66.6 // indirect - gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/klog/v2 v2.30.0 // indirect - k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect - k8s.io/utils v0.0.0-20211116205334-6203023598ed // indirect - sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect - sigs.k8s.io/yaml v1.2.0 // indirect -) - -replace ( - k8s.io/api => k8s.io/api v0.23.17 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.23.17 - k8s.io/apimachinery => k8s.io/apimachinery v0.23.17 - k8s.io/apiserver => k8s.io/apiserver v0.23.17 - k8s.io/cli-runtime => k8s.io/cli-runtime v0.23.17 - k8s.io/client-go => k8s.io/client-go v0.23.17 - k8s.io/cloud-provider => k8s.io/cloud-provider v0.23.17 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.23.17 - k8s.io/code-generator => k8s.io/code-generator v0.23.17 - k8s.io/component-base => k8s.io/component-base v0.23.17 - k8s.io/cri-api => k8s.io/cri-api v0.23.17 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.23.17 - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.23.17 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.23.17 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.23.17 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.23.17 - k8s.io/kubectl => k8s.io/kubectl v0.23.17 - k8s.io/kubelet => k8s.io/kubelet v0.23.17 - k8s.io/kubernetes => k8s.io/kubernetes v0.23.17 - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.23.17 - k8s.io/metrics => k8s.io/metrics v0.23.17 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.23.17 -) diff --git a/e2e/go.sum b/e2e/go.sum deleted file mode 100644 index e61bda00..00000000 --- a/e2e/go.sum +++ /dev/null @@ -1,574 +0,0 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/appscode/go v0.0.0-20200323182826-54e98e09185a h1:cZ80NKoLRaW1PVCWXAJE+YFkBAmLZ8BnrJmH0ClY1Gs= -github.com/appscode/go v0.0.0-20200323182826-54e98e09185a/go.mod h1:lIcm8Z6VPuvcw/a3EeOWcG6R3I13iHMLYbtVP7TKufY= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= -github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= -github.com/codeskyblue/go-sh v0.0.0-20190412065543-76bd3d59ff27/go.mod h1:VQx0hjo2oUeQkQUET7wRwradO6f+fN5jzXgB/zROxxE= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= -github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-resty/resty/v2 v2.9.1 h1:PIgGx4VrHvag0juCJ4dDv3MiFRlDmP0vicBucwf+gLM= -github.com/go-resty/resty/v2 v2.9.1/go.mod h1:4/GYJVjh9nhkhGR6AUNW3XhpDYNUr+Uvy9gV/VGZIy4= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= -github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= -github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= -github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/linode/linodego v1.26.0 h1:2tOZ3Wxn4YvGBRgZi3Vz6dab+L16XUntJ9sJxh3ZBio= -github.com/linode/linodego v1.26.0/go.mod h1:kD7Bf1piWg/AXb9TA0ThAVwzR+GPf6r2PvbTbVk7PMA= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= -github.com/onsi/ginkgo/v2 v2.13.2 h1:Bi2gGVkfn6gQcjNjZJVO8Gf0FHzMPf2phUei9tejVMs= -github.com/onsi/ginkgo/v2 v2.13.2/go.mod h1:XStQ8QcGwLyF4HdfcZB8SFOS/MWCgDuXMSBe6zrvLgM= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8= -github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190422183909-d864b10871cd/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= -golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= -golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= -golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= -golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= -golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/version v0.1.0/go.mod h1:Y8xuV02mL/45psyPKG3NCVOwvAOy6T5Kx0l3rCjKSjU= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.66.6 h1:LATuAqN/shcYAOkv3wl2L4rkaKqkcgTBQjOyYDvcPKI= -gopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.23.17 h1:gC11V5AIsNXUUa/xd5RQo7djukvl5O1ZDQKwEYu0H7g= -k8s.io/api v0.23.17/go.mod h1:upM9VIzXUjEyLTmGGi0KnH8kdlPnvgv+fEJ3tggDHfE= -k8s.io/apimachinery v0.23.17 h1:ipJ0SrpI6EzH8zVw0WhCBldgJhzIamiYIumSGTdFExY= -k8s.io/apimachinery v0.23.17/go.mod h1:87v5Wl9qpHbnapX1PSNgln4oO3dlyjAU3NSIwNhT4Lo= -k8s.io/client-go v0.23.17 h1:MbW05RO5sy+TFw2ds36SDdNSkJbr8DFVaaVrClSA8Vs= -k8s.io/client-go v0.23.17/go.mod h1:X5yz7nbJHS7q8977AKn8BWKgxeAXjl1sFsgstczUsCM= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw= -k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4= -k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= -k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20211116205334-6203023598ed h1:ck1fRPWPJWsMd8ZRFsWc6mh/zHp5fZ/shhbrgPUxDAE= -k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s= -sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/e2e/setup/cilium-setup.sh b/e2e/setup/cilium-setup.sh new file mode 100755 index 00000000..9e8a7afb --- /dev/null +++ b/e2e/setup/cilium-setup.sh @@ -0,0 +1,32 @@ +#!/bin/bash +set -euo pipefail + +# Add bgp peering label to non control plane nodes. Needed to update the shared IP on the nodes +kubectl get nodes --no-headers | grep -v control-plane |\ + awk '{print $1}' | xargs -I {} kubectl label nodes {} cilium-bgp-peering=true --overwrite + +# Add RBAC permissions +kubectl patch clusterrole ccm-linode-clusterrole --type='json' -p='[{ + "op": "add", + "path": "/rules/-", + "value": { + "apiGroups": ["cilium.io"], + "resources": ["ciliumloadbalancerippools", "ciliumbgppeeringpolicies"], + "verbs": ["get", "list", "watch", "create", "update", "patch", "delete"] + } +}]' + +# Patch DaemonSet +kubectl patch daemonset ccm-linode -n kube-system --type='json' -p='[{ + "op": "add", + "path": "/spec/template/spec/containers/0/args/-", + "value": "--bgp-node-selector=cilium-bgp-peering=true" +}, { + "op": "add", + "path": "/spec/template/spec/containers/0/args/-", + "value": "--load-balancer-type=cilium-bgp" +}, { + "op": "add", + "path": "/spec/template/spec/containers/0/args/-", + "value": "--ip-holder-suffix='"${CLUSTER_SUFFIX}"'" +}]' diff --git a/e2e/setup/ctlptl-config.yaml b/e2e/setup/ctlptl-config.yaml new file mode 100644 index 00000000..6d9570a2 --- /dev/null +++ b/e2e/setup/ctlptl-config.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: ctlptl.dev/v1alpha1 +kind: Cluster +product: kind +kindV1Alpha4Cluster: + name: caplccm + nodes: + - role: control-plane + image: kindest/node:v1.31.2 diff --git a/e2e/test/.gitignore b/e2e/test/.gitignore deleted file mode 100644 index ac582049..00000000 --- a/e2e/test/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -# dont commit temporary files written here by Terraform -*.conf -cluster.tf -terraform* -.terraform diff --git a/e2e/test/assert-ccm-resources.yaml b/e2e/test/assert-ccm-resources.yaml new file mode 100644 index 00000000..4d7d87d6 --- /dev/null +++ b/e2e/test/assert-ccm-resources.yaml @@ -0,0 +1,8 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: ccm-linode + namespace: kube-system +status: + numberAvailable: 1 + numberReady: 1 diff --git a/e2e/test/ccm_e2e_test.go b/e2e/test/ccm_e2e_test.go deleted file mode 100644 index d5ce41ba..00000000 --- a/e2e/test/ccm_e2e_test.go +++ /dev/null @@ -1,1398 +0,0 @@ -package test - -import ( - "context" - "e2e_test/test/framework" - "fmt" - "os/exec" - "strconv" - "time" - - "k8s.io/apimachinery/pkg/api/errors" - - "github.com/linode/linodego" - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - "github.com/onsi/gomega/types" - core "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/apimachinery/pkg/watch" -) - -func EnsuredService() types.GomegaMatcher { - return And( - WithTransform(func(e watch.Event) (string, error) { - event, ok := e.Object.(*core.Event) - if !ok { - return "", fmt.Errorf("failed to poll event") - } - return event.Reason, nil - }, Equal("EnsuredLoadBalancer")), - ) -} - -var _ = Describe("e2e tests", func() { - var ( - err error - f *framework.Invocation - workers []string - ) - - const ( - annLinodeProxyProtocolDeprecated = "service.beta.kubernetes.io/linode-loadbalancer-proxy-protocol" - annLinodeDefaultProxyProtocol = "service.beta.kubernetes.io/linode-loadbalancer-default-proxy-protocol" - annLinodeDefaultProtocol = "service.beta.kubernetes.io/linode-loadbalancer-default-protocol" - annLinodePortConfigPrefix = "service.beta.kubernetes.io/linode-loadbalancer-port-" - annLinodeLoadBalancerPreserve = "service.beta.kubernetes.io/linode-loadbalancer-preserve" - annLinodeHealthCheckType = "service.beta.kubernetes.io/linode-loadbalancer-check-type" - annLinodeCheckBody = "service.beta.kubernetes.io/linode-loadbalancer-check-body" - annLinodeCheckPath = "service.beta.kubernetes.io/linode-loadbalancer-check-path" - annLinodeHealthCheckInterval = "service.beta.kubernetes.io/linode-loadbalancer-check-interval" - annLinodeHealthCheckTimeout = "service.beta.kubernetes.io/linode-loadbalancer-check-timeout" - annLinodeHealthCheckAttempts = "service.beta.kubernetes.io/linode-loadbalancer-check-attempts" - annLinodeHealthCheckPassive = "service.beta.kubernetes.io/linode-loadbalancer-check-passive" - annLinodeNodeBalancerID = "service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id" - annLinodeHostnameOnlyIngress = "service.beta.kubernetes.io/linode-loadbalancer-hostname-only-ingress" - ) - - BeforeEach(func() { - f = root.Invoke() - workers, err = f.GetNodeList() - Expect(err).NotTo(HaveOccurred()) - Expect(len(workers)).Should(BeNumerically(">=", 2)) - }) - - createPodWithLabel := func(pods []string, ports []core.ContainerPort, image string, labels map[string]string, selectNode bool) { - for i, pod := range pods { - p := f.LoadBalancer.GetPodObject(pod, image, ports, labels) - if selectNode { - p = f.LoadBalancer.SetNodeSelector(p, workers[i]) - } - Expect(f.LoadBalancer.CreatePod(p)).ToNot(BeNil()) - Eventually(f.LoadBalancer.GetPod).WithArguments(p.ObjectMeta.Name, f.LoadBalancer.Namespace()).Should(HaveField("Status.Phase", Equal(core.PodRunning))) - } - } - - deletePods := func(pods []string) { - for _, pod := range pods { - Expect(f.LoadBalancer.DeletePod(pod)).NotTo(HaveOccurred()) - } - } - - deleteService := func() { - Expect(f.LoadBalancer.DeleteService()).NotTo(HaveOccurred()) - } - - deleteSecret := func(name string) { - Expect(f.LoadBalancer.DeleteSecret(name)).NotTo(HaveOccurred()) - } - - ensureServiceLoadBalancer := func() { - watcher, err := f.LoadBalancer.GetServiceWatcher() - Expect(err).NotTo(HaveOccurred()) - Eventually(watcher.ResultChan()).Should(Receive(EnsuredService())) - } - - ensureServiceWasDeleted := func() { - err := func() error { - _, err := f.LoadBalancer.GetService() - return err - } - Eventually(err).WithTimeout(10 * time.Second).Should(MatchError(errors.IsNotFound, "IsNotFound")) - } - - createServiceWithSelector := func(selector map[string]string, ports []core.ServicePort, isSessionAffinityClientIP bool) { - Expect(f.LoadBalancer.CreateService(selector, nil, ports, isSessionAffinityClientIP)).NotTo(HaveOccurred()) - Eventually(f.LoadBalancer.GetServiceEndpoints).Should(Not(BeEmpty())) - ensureServiceLoadBalancer() - } - - createServiceWithAnnotations := func(labels, annotations map[string]string, ports []core.ServicePort, isSessionAffinityClientIP bool) { - Expect(f.LoadBalancer.CreateService(labels, annotations, ports, isSessionAffinityClientIP)).NotTo(HaveOccurred()) - Eventually(f.LoadBalancer.GetServiceEndpoints).Should(Not(BeEmpty())) - ensureServiceLoadBalancer() - } - - updateServiceWithAnnotations := func(labels, annotations map[string]string, ports []core.ServicePort, isSessionAffinityClientIP bool) { - Expect(f.LoadBalancer.UpdateService(labels, annotations, ports, isSessionAffinityClientIP)).NotTo(HaveOccurred()) - Eventually(f.LoadBalancer.GetServiceEndpoints).Should(Not(BeEmpty())) - ensureServiceLoadBalancer() - } - - deleteNodeBalancer := func(id int) { - Expect(getLinodeClient().DeleteNodeBalancer(context.Background(), id)).NotTo(HaveOccurred()) - } - - createNodeBalancer := func() int { - var nb *linodego.NodeBalancer - nb, err = getLinodeClient().CreateNodeBalancer(context.TODO(), linodego.NodeBalancerCreateOptions{ - Region: region, - }) - Expect(err).NotTo(HaveOccurred()) - Expect(nb).NotTo(BeNil()) - return nb.ID - } - - checkNumberOfWorkerNodes := func(numNodes int) { - Eventually(f.GetNodeList).Should(HaveLen(numNodes)) - } - - checkNumberOfUpNodes := func(numNodes int) { - By("Checking the Number of Up Nodes") - Eventually(f.LoadBalancer.GetNodeBalancerUpNodes).WithArguments(framework.TestServerResourceName).Should(BeNumerically(">=", numNodes)) - } - - checkNodeBalancerExists := func(id int) { - By("Checking if the NodeBalancer exists") - Eventually(getLinodeClient().GetNodeBalancer).WithArguments(context.Background(), id).Should(HaveField("ID", Equal(id))) - } - - checkNodeBalancerNotExists := func(id int) { - Eventually(func() int { - _, err := getLinodeClient().GetNodeBalancer(context.Background(), id) - if err == nil { - return 0 - } - linodeErr, _ := err.(*linodego.Error) - return linodeErr.Code - }).Should(Equal(404)) - } - - type checkArgs struct { - checkType, path, body, interval, timeout, attempts, checkPassive, protocol, proxyProtocol string - checkNodes bool - } - - checkNodeBalancerID := func(service string, expectedID int) { - Eventually(f.LoadBalancer.GetNodeBalancerID).WithArguments(service).Should(Equal(expectedID)) - } - - checkLBStatus := func(service string, hasIP bool) { - Eventually(f.LoadBalancer.GetNodeBalancerFromService).WithArguments(service, hasIP).Should(Not(BeNil())) - } - - checkNodeBalancerConfigForPort := func(port int, args checkArgs) { - By("Getting NodeBalancer Configuration for port " + strconv.Itoa(port)) - var nbConfig *linodego.NodeBalancerConfig - Eventually(func() error { - nbConfig, err = f.LoadBalancer.GetNodeBalancerConfigForPort(framework.TestServerResourceName, port) - return err - }).Should(BeNil()) - - if args.checkType != "" { - By("Checking Health Check Type") - Expect(string(nbConfig.Check)).To(Equal(args.checkType)) - } - - if args.path != "" { - By("Checking Health Check Path") - Expect(nbConfig.CheckPath).To(Equal(args.path)) - } - - if args.body != "" { - By("Checking Health Check Body") - Expect(nbConfig.CheckBody).To(Equal(args.body)) - } - - if args.interval != "" { - By("Checking TCP Connection Health Check Body") - intInterval, err := strconv.Atoi(args.interval) - Expect(err).NotTo(HaveOccurred()) - - Expect(nbConfig.CheckInterval).To(Equal(intInterval)) - } - - if args.timeout != "" { - By("Checking TCP Connection Health Check Timeout") - intTimeout, err := strconv.Atoi(args.timeout) - Expect(err).NotTo(HaveOccurred()) - - Expect(nbConfig.CheckTimeout).To(Equal(intTimeout)) - } - - if args.attempts != "" { - By("Checking TCP Connection Health Check Attempts") - intAttempts, err := strconv.Atoi(args.attempts) - Expect(err).NotTo(HaveOccurred()) - - Expect(nbConfig.CheckAttempts).To(Equal(intAttempts)) - } - - if args.checkPassive != "" { - By("Checking for Passive Health Check") - boolCheckPassive, err := strconv.ParseBool(args.checkPassive) - Expect(err).NotTo(HaveOccurred()) - - Expect(nbConfig.CheckPassive).To(Equal(boolCheckPassive)) - } - - if args.protocol != "" { - By("Checking for Protocol") - Expect(string(nbConfig.Protocol)).To(Equal(args.protocol)) - } - - if args.proxyProtocol != "" { - By("Checking for Proxy Protocol") - Expect(string(nbConfig.ProxyProtocol)).To(Equal(args.proxyProtocol)) - } - - if args.checkNodes { - checkNumberOfUpNodes(2) - } - } - - addNewNode := func() { - err := exec.Command("terraform", "apply", "-var", "nodes=3", "-auto-approve").Run() - Expect(err).NotTo(HaveOccurred()) - } - - deleteNewNode := func() { - err := exec.Command("terraform", "apply", "-var", "nodes=2", "-auto-approve").Run() - Expect(err).NotTo(HaveOccurred()) - } - - waitForNodeAddition := func() { - checkNumberOfUpNodes(3) - } - - Describe("Test", func() { - Context("Simple", func() { - Context("Load Balancer", func() { - var ( - pods []string - labels map[string]string - ) - - BeforeEach(func() { - pods = []string{"test-pod-1", "test-pod-2"} - ports := []core.ContainerPort{ - { - Name: "http-1", - ContainerPort: 8080, - }, - } - servicePorts := []core.ServicePort{ - { - Name: "http-1", - Port: 80, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - } - labels = map[string]string{ - "app": "test-loadbalancer", - } - - By("Creating Pods") - createPodWithLabel(pods, ports, framework.TestServerImage, labels, true) - - By("Creating Service") - createServiceWithSelector(labels, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - }) - - It("should reach all pods", func() { - By("Checking TCP Response") - var eps []string - Eventually(func() error { - eps, err = f.LoadBalancer.GetLoadBalancerIps() - return err - }).Should(BeNil()) - Eventually(framework.GetResponseFromCurl).WithArguments(eps[0]).Should(ContainSubstring(pods[0])) - Eventually(framework.GetResponseFromCurl).WithArguments(eps[0]).Should(ContainSubstring(pods[1])) - }) - }) - }) - }) - - Describe("Test", func() { - Context("LoadBalancer", func() { - AfterEach(func() { - err := root.Recycle() - Expect(err).NotTo(HaveOccurred()) - }) - - Context("With single TLS port", func() { - var ( - pods []string - labels map[string]string - annotations map[string]string - secretName string - ) - BeforeEach(func() { - pods = []string{"test-single-port-pod"} - ports := []core.ContainerPort{ - { - Name: "https", - ContainerPort: 8080, - }, - } - servicePorts := []core.ServicePort{ - { - Name: "https", - Port: 80, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - } - secretName = "tls-secret" - labels = map[string]string{ - "app": "test-loadbalancer", - } - annotations = map[string]string{ - annLinodePortConfigPrefix + "80": `{ "tls-secret-name": "` + secretName + `" }`, - annLinodeDefaultProtocol: "https", - } - - By("Creating Pod") - createPodWithLabel(pods, ports, framework.TestServerImage, labels, false) - - By("Creating Secret") - Expect(f.LoadBalancer.CreateTLSSecret("tls-secret")).NotTo(HaveOccurred()) - - By("Creating Service") - createServiceWithAnnotations(labels, annotations, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Secrets") - deletePods(pods) - - By("Deleting the Service") - deleteService() - - By("Deleting the Secret") - deleteSecret(secretName) - }) - - It("should reach the pod via tls", func() { - By("Checking TCP Response") - var eps []string - Eventually(func() error { - eps, err = f.LoadBalancer.GetLoadBalancerIps() - return err - }).Should(BeNil()) - - By("Waiting for Response from the LoadBalancer url: " + eps[0]) - Eventually(framework.WaitForHTTPSResponse).WithArguments(eps[0]).Should(ContainSubstring(pods[0])) - }) - }) - - Context("With Hostname only ingress", func() { - var ( - pods []string - labels map[string]string - servicePorts []core.ServicePort - - annotations = map[string]string{} - ) - - BeforeEach(func() { - pods = []string{"test-pod-1"} - ports := []core.ContainerPort{ - { - Name: "http-1", - ContainerPort: 80, - }, - } - servicePorts = []core.ServicePort{ - { - Name: "http-1", - Port: 80, - TargetPort: intstr.FromInt(80), - Protocol: "TCP", - }, - } - - labels = map[string]string{ - "app": "test-loadbalancer-with-hostname-only-ingress", - } - - By("Creating Pod") - createPodWithLabel(pods, ports, framework.TestServerImage, labels, false) - - By("Creating Service") - createServiceWithAnnotations(labels, map[string]string{}, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - }) - - It("can update service to only use Hostname in ingress", func() { - By("Checking LB Status has IP") - checkLBStatus(framework.TestServerResourceName, true) - - By("Annotating service with " + annLinodeHostnameOnlyIngress) - updateServiceWithAnnotations(labels, map[string]string{ - annLinodeHostnameOnlyIngress: "true", - }, servicePorts, false) - - By("Checking LB Status does not have IP") - checkLBStatus(framework.TestServerResourceName, false) - }) - - annotations[annLinodeHostnameOnlyIngress] = "true" - - It("can create a service that only uses Hostname in ingress", func() { - By("Creating a service annotated with " + annLinodeHostnameOnlyIngress) - checkLBStatus(framework.TestServerResourceName, true) - }) - }) - - Context("With ProxyProtocol", func() { - var ( - pods []string - labels map[string]string - servicePorts []core.ServicePort - - proxyProtocolV1 = string(linodego.ProxyProtocolV1) - proxyProtocolV2 = string(linodego.ProxyProtocolV2) - proxyProtocolNone = string(linodego.ProxyProtocolNone) - ) - BeforeEach(func() { - pods = []string{"test-pod-1"} - ports := []core.ContainerPort{ - { - Name: "http-1", - ContainerPort: 80, - }, - { - Name: "http-2", - ContainerPort: 8080, - }, - } - servicePorts = []core.ServicePort{ - { - Name: "http-1", - Port: 80, - TargetPort: intstr.FromInt(80), - Protocol: "TCP", - }, - { - Name: "http-2", - Port: 8080, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - } - - labels = map[string]string{ - "app": "test-loadbalancer-with-proxyprotocol", - } - - By("Creating Pod") - createPodWithLabel(pods, ports, framework.TestServerImage, labels, false) - - By("Creating Service") - createServiceWithAnnotations(labels, map[string]string{}, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - }) - - It("can set proxy-protocol on each port", func() { - By("Annotating port 80 with v1 and 8080 with v2") - updateServiceWithAnnotations(labels, map[string]string{ - annLinodePortConfigPrefix + "80": fmt.Sprintf(`{"proxy-protocol": "%s"}`, proxyProtocolV1), - annLinodePortConfigPrefix + "8080": fmt.Sprintf(`{"proxy-protocol": "%s"}`, proxyProtocolV2), - }, servicePorts, false) - - By("Checking NodeBalancerConfig for port 80 should have ProxyProtocol v1") - checkNodeBalancerConfigForPort(80, checkArgs{proxyProtocol: proxyProtocolV1}) - - By("Checking NodeBalancerConfig for port 8080 should have ProxyProtocol v2") - checkNodeBalancerConfigForPort(8080, checkArgs{proxyProtocol: proxyProtocolV2}) - }) - - It("should override default proxy-protocol annotation when a port configuration is specified", func() { - By("Annotating a default version of ProxyProtocol v2 and v1 for port 8080") - updateServiceWithAnnotations(labels, map[string]string{ - annLinodeDefaultProxyProtocol: proxyProtocolV2, - annLinodePortConfigPrefix + "8080": fmt.Sprintf(`{"proxy-protocol": "%s"}`, proxyProtocolV1), - }, servicePorts, false) - - By("Checking NodeBalancerConfig for port 80 should have the default ProxyProtocol v2") - checkNodeBalancerConfigForPort(80, checkArgs{proxyProtocol: proxyProtocolV2}) - - By("Checking NodeBalancerConfig for port 8080 should have ProxyProtocol v1") - checkNodeBalancerConfigForPort(8080, checkArgs{proxyProtocol: proxyProtocolV1}) - }) - - It("port specific configuration should not effect other ports", func() { - By("Annotating ProxyProtocol v2 on port 8080") - updateServiceWithAnnotations(labels, map[string]string{ - annLinodePortConfigPrefix + "8080": fmt.Sprintf(`{"proxy-protocol": "%s"}`, proxyProtocolV2), - }, servicePorts, false) - - By("Checking NodeBalancerConfig for port 8080 should have ProxyProtocolv2") - checkNodeBalancerConfigForPort(8080, checkArgs{proxyProtocol: proxyProtocolV2}) - - By("Checking NodeBalancerConfig for port 80 should not have ProxyProtocol enabled") - checkNodeBalancerConfigForPort(80, checkArgs{proxyProtocol: proxyProtocolNone}) - }) - - It("default annotations can be used to apply ProxyProtocol to all NodeBalancerConfigs", func() { - annotations := make(map[string]string) - - By("By specifying ProxyProtocol v2 using the deprecated annotation " + annLinodeProxyProtocolDeprecated) - annotations[annLinodeProxyProtocolDeprecated] = proxyProtocolV2 - updateServiceWithAnnotations(labels, annotations, servicePorts, false) - - By("Checking NodeBalancerConfig for port 80 should have default ProxyProtocol v2") - checkNodeBalancerConfigForPort(80, checkArgs{proxyProtocol: proxyProtocolV2}) - By("Checking NodeBalancerConfig for port 8080 should have ProxyProtocol v2") - checkNodeBalancerConfigForPort(8080, checkArgs{proxyProtocol: proxyProtocolV2}) - - By("specifying ProxyProtocol v1 using the annotation " + annLinodeDefaultProtocol) - annotations[annLinodeDefaultProxyProtocol] = proxyProtocolV1 - updateServiceWithAnnotations(labels, annotations, servicePorts, false) - - By("Checking NodeBalancerConfig for port 80 should have default ProxyProtocol v1") - checkNodeBalancerConfigForPort(80, checkArgs{proxyProtocol: proxyProtocolV1}) - By("Checking NodeBalancerConfig for port 8080 should have ProxyProtocol v1") - checkNodeBalancerConfigForPort(8080, checkArgs{proxyProtocol: proxyProtocolV1}) - }) - }) - - Context("With Multiple HTTP and HTTPS Ports", func() { - var ( - pods []string - labels map[string]string - annotations map[string]string - secretName1 string - secretName2 string - ) - BeforeEach(func() { - pods = []string{"tls-multi-port-pod"} - secretName1 = "tls-secret-1" - secretName2 = "tls-secret-2" - labels = map[string]string{ - "app": "test-loadbalancer", - } - annotations = map[string]string{ - annLinodeDefaultProtocol: "https", - annLinodePortConfigPrefix + "80": `{"protocol": "http"}`, - annLinodePortConfigPrefix + "8080": `{"protocol": "http"}`, - annLinodePortConfigPrefix + "443": `{"tls-secret-name": "` + secretName1 + `"}`, - annLinodePortConfigPrefix + "8443": `{"tls-secret-name": "` + secretName2 + `", "protocol": "https"}`, - } - ports := []core.ContainerPort{ - { - Name: "alpha", - ContainerPort: 8080, - }, - { - Name: "beta", - ContainerPort: 8989, - }, - } - servicePorts := []core.ServicePort{ - { - Name: "http-1", - Port: 80, - TargetPort: intstr.FromInt(8989), - Protocol: "TCP", - }, - { - Name: "http-2", - Port: 8080, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - { - Name: "https-1", - Port: 443, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - { - Name: "https-2", - Port: 8443, - TargetPort: intstr.FromInt(8989), - Protocol: "TCP", - }, - } - - By("Creating Pod") - createPodWithLabel(pods, ports, framework.TestServerImage, labels, false) - - By("Creating Secret") - err = f.LoadBalancer.CreateTLSSecret(secretName1) - Expect(err).NotTo(HaveOccurred()) - err = f.LoadBalancer.CreateTLSSecret(secretName2) - Expect(err).NotTo(HaveOccurred()) - - By("Creating Service") - createServiceWithAnnotations(labels, annotations, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Secrets") - deletePods(pods) - - By("Deleting the Service") - deleteService() - - By("Deleting the Secret") - deleteSecret(secretName1) - deleteSecret(secretName2) - }) - - It("should reach the pods", func() { - By("Checking TCP Response") - var eps []string - Eventually(func() error { - eps, err = f.LoadBalancer.GetLoadBalancerIps() - return err - }).Should(BeNil()) - Expect(eps).Should(HaveLen(4)) - - // in order of the spec - http80, http8080, https443, https8443 := eps[0], eps[1], eps[2], eps[3] - Eventually(framework.WaitForHTTPResponse).WithArguments(http80).Should(ContainSubstring(pods[0])) - Eventually(framework.WaitForHTTPResponse).WithArguments(http8080).Should(ContainSubstring(pods[0])) - Eventually(framework.WaitForHTTPSResponse).WithArguments(https443).Should(ContainSubstring(pods[0])) - Eventually(framework.WaitForHTTPSResponse).WithArguments(https8443).Should(ContainSubstring(pods[0])) - }) - }) - - Context("With HTTP updating to have HTTPS", Serial, func() { - var ( - pods []string - labels map[string]string - annotations map[string]string - secretName string - ) - BeforeEach(func() { - pods = []string{"tls-pod"} - secretName = "tls-secret-1" - labels = map[string]string{ - "app": "test-loadbalancer", - } - annotations = map[string]string{ - annLinodeDefaultProtocol: "https", - annLinodePortConfigPrefix + "80": `{"protocol": "http"}`, - } - ports := []core.ContainerPort{ - { - Name: "alpha", - ContainerPort: 8080, - }, - } - servicePorts := []core.ServicePort{ - { - Name: "http", - Port: 80, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - } - - By("Creating Pod") - createPodWithLabel(pods, ports, framework.TestServerImage, labels, false) - - By("Creating Service") - createServiceWithAnnotations(labels, annotations, servicePorts, false) - - By("Creating Secret") - err = f.LoadBalancer.CreateTLSSecret(secretName) - Expect(err).NotTo(HaveOccurred()) - - By("Updating the Service") - updateAnnotations := map[string]string{ - annLinodeDefaultProtocol: "https", - annLinodePortConfigPrefix + "80": `{"protocol": "http"}`, - annLinodePortConfigPrefix + "443": `{"tls-secret-name": "` + secretName + `", "protocol": "https"}`, - } - updateServicePorts := []core.ServicePort{ - { - Name: "http", - Port: 80, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - { - Name: "https", - Port: 443, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - } - updateServiceWithAnnotations(labels, updateAnnotations, updateServicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Secrets") - deletePods(pods) - - By("Deleting the Service") - deleteService() - - By("Deleting the Secret") - deleteSecret(secretName) - }) - - It("should reach the pods", func() { - By("Checking TCP Response") - var eps []string - Eventually(func() error { - eps, err = f.LoadBalancer.GetLoadBalancerIps() - return err - }).Should(BeNil()) - Expect(eps).Should(HaveLen(2)) - http80, https443 := eps[0], eps[1] - By("Waiting for Response from the LoadBalancer url: " + http80) - Eventually(framework.WaitForHTTPResponse).WithArguments(http80).Should(ContainSubstring(pods[0])) - - By("Waiting for Response from the LoadBalancer url: " + https443) - Eventually(framework.WaitForHTTPSResponse).WithArguments(https443).Should(ContainSubstring(pods[0])) - }) - }) - - Context("For HTTP body health check", func() { - var ( - pods []string - labels map[string]string - annotations map[string]string - - checkType = "http_body" - path = "/" - body = "nginx" - protocol = "http" - ) - BeforeEach(func() { - pods = []string{"test-pod-http-body"} - ports := []core.ContainerPort{ - { - Name: "http", - ContainerPort: 80, - }, - } - servicePorts := []core.ServicePort{ - { - Name: "http", - Port: 80, - TargetPort: intstr.FromInt(80), - Protocol: "TCP", - }, - } - - labels = map[string]string{ - "app": "test-loadbalancer", - } - annotations = map[string]string{ - annLinodeHealthCheckType: checkType, - annLinodeCheckPath: path, - annLinodeCheckBody: body, - annLinodeDefaultProtocol: protocol, - } - - By("Creating Pod") - createPodWithLabel(pods, ports, "nginx", labels, false) - - By("Creating Service") - createServiceWithAnnotations(labels, annotations, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - }) - - It("should successfully check the health of 2 nodes", func() { - By("Checking NodeBalancer Configurations") - checkNodeBalancerConfigForPort(80, checkArgs{ - checkType: checkType, - path: path, - body: body, - protocol: protocol, - checkNodes: true, - }) - }) - }) - - Context("Updated with NodeBalancerID", func() { - var ( - pods []string - labels map[string]string - servicePorts []core.ServicePort - - annotations = map[string]string{} - ) - BeforeEach(func() { - pods = []string{"test-pod-1"} - ports := []core.ContainerPort{ - { - Name: "http-1", - ContainerPort: 8080, - }, - } - servicePorts = []core.ServicePort{ - { - Name: "http-1", - Port: 80, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - } - - labels = map[string]string{ - "app": "test-loadbalancer-with-nodebalancer-id", - } - - By("Creating Pod") - createPodWithLabel(pods, ports, framework.TestServerImage, labels, false) - - By("Creating Service") - createServiceWithAnnotations(labels, annotations, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - }) - - It("should use the specified NodeBalancer", func() { - By("Creating new NodeBalancer") - nbID := createNodeBalancer() - - By("Annotating service with new NodeBalancer ID") - annotations[annLinodeNodeBalancerID] = strconv.Itoa(nbID) - updateServiceWithAnnotations(labels, annotations, servicePorts, false) - - By("Checking the NodeBalancer ID") - checkNodeBalancerID(framework.TestServerResourceName, nbID) - }) - }) - - Context("Created with NodeBalancerID", func() { - var ( - pods []string - labels map[string]string - annotations map[string]string - servicePorts []core.ServicePort - - nodeBalancerID int - ) - - BeforeEach(func() { - pods = []string{"test-pod-1"} - ports := []core.ContainerPort{ - { - Name: "http-1", - ContainerPort: 8080, - }, - } - servicePorts = []core.ServicePort{ - { - Name: "http-1", - Port: 80, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - } - - labels = map[string]string{ - "app": "test-loadbalancer-with-nodebalancer-id", - } - - By("Creating NodeBalancer") - nodeBalancerID = createNodeBalancer() - - annotations = map[string]string{ - annLinodeNodeBalancerID: strconv.Itoa(nodeBalancerID), - } - - By("Creating Pod") - createPodWithLabel(pods, ports, framework.TestServerImage, labels, false) - - By("Creating Service") - createServiceWithAnnotations(labels, annotations, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - - err := root.Recycle() - Expect(err).NotTo(HaveOccurred()) - }) - - It("should use the specified NodeBalancer", func() { - By("Checking the NodeBalancerID") - checkNodeBalancerID(framework.TestServerResourceName, nodeBalancerID) - }) - - It("should use the newly specified NodeBalancer ID", func() { - By("Creating new NodeBalancer") - nbID := createNodeBalancer() - - By("Waiting for current NodeBalancer to be ready") - checkNodeBalancerID(framework.TestServerResourceName, nodeBalancerID) - - By("Annotating service with new NodeBalancer ID") - annotations[annLinodeNodeBalancerID] = strconv.Itoa(nbID) - updateServiceWithAnnotations(labels, annotations, servicePorts, false) - - By("Checking the NodeBalancer ID") - checkNodeBalancerID(framework.TestServerResourceName, nbID) - - By("Checking old NodeBalancer was deleted") - checkNodeBalancerNotExists(nodeBalancerID) - }) - }) - - Context("Deleted Service when NodeBalancer not present", func() { - var ( - pods []string - labels map[string]string - annotations map[string]string - servicePorts []core.ServicePort - - nodeBalancerID int - ) - - BeforeEach(func() { - pods = []string{"test-pod-1"} - ports := []core.ContainerPort{ - { - Name: "http-1", - ContainerPort: 8080, - }, - } - servicePorts = []core.ServicePort{ - { - Name: "http-1", - Port: 80, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - } - - labels = map[string]string{ - "app": "test-loadbalancer-with-nodebalancer-id", - } - - By("Creating NodeBalancer") - nodeBalancerID = createNodeBalancer() - - annotations = map[string]string{ - annLinodeNodeBalancerID: strconv.Itoa(nodeBalancerID), - } - - By("Creating Pod") - createPodWithLabel(pods, ports, framework.TestServerImage, labels, false) - - By("Creating Service") - createServiceWithAnnotations(labels, annotations, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Pods") - deletePods(pods) - - err := root.Recycle() - Expect(err).NotTo(HaveOccurred()) - }) - - It("should use the specified NodeBalancer", func() { - By("Checking the NodeBalancerID") - checkNodeBalancerID(framework.TestServerResourceName, nodeBalancerID) - }) - - It("should use the newly specified NodeBalancer ID", func() { - By("Creating new NodeBalancer") - nbID := createNodeBalancer() - - By("Waiting for current NodeBalancer to be ready") - checkNodeBalancerID(framework.TestServerResourceName, nodeBalancerID) - - By("Annotating service with new NodeBalancer ID") - annotations[annLinodeNodeBalancerID] = strconv.Itoa(nbID) - updateServiceWithAnnotations(labels, annotations, servicePorts, false) - - By("Checking the NodeBalancer ID") - checkNodeBalancerID(framework.TestServerResourceName, nbID) - - By("Checking old NodeBalancer was deleted") - checkNodeBalancerNotExists(nodeBalancerID) - }) - - It("should delete the service with no NodeBalancer present", func() { - By("Deleting the NodeBalancer") - deleteNodeBalancer(nodeBalancerID) - - By("Checking old NodeBalancer was deleted") - checkNodeBalancerNotExists(nodeBalancerID) - - By("Deleting the Service") - deleteService() - - By("Checking if the service was deleted") - ensureServiceWasDeleted() - }) - }) - - Context("With Preserve Annotation", func() { - var ( - pods []string - servicePorts []core.ServicePort - labels map[string]string - annotations map[string]string - nodeBalancerID int - ) - - BeforeEach(func() { - pods = []string{"test-pod-1"} - ports := []core.ContainerPort{ - { - Name: "http-1", - ContainerPort: 8080, - }, - } - servicePorts = []core.ServicePort{ - { - Name: "http-1", - Port: 80, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - } - - labels = map[string]string{ - "app": "test-loadbalancer", - } - annotations = map[string]string{ - annLinodeLoadBalancerPreserve: "true", - } - - By("Creating Pod") - createPodWithLabel(pods, ports, framework.TestServerImage, labels, false) - - By("Creating Service") - createServiceWithAnnotations(labels, annotations, servicePorts, false) - - By("Getting NodeBalancer ID") - nodeBalancerID, err = f.LoadBalancer.GetNodeBalancerID(framework.TestServerResourceName) - Expect(err).NotTo(HaveOccurred()) - }) - - AfterEach(func() { - By("Deleting the NodeBalancer") - deleteNodeBalancer(nodeBalancerID) - - err := root.Recycle() - Expect(err).NotTo(HaveOccurred()) - }) - - It("should preserve the underlying nodebalancer after service deletion", func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - - By("Checking if the NodeBalancer exists") - checkNodeBalancerExists(nodeBalancerID) - }) - - It("should preserve the underlying nodebalancer after a new one is specified", func() { - defer func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - }() - - By("Creating new NodeBalancer") - newID := createNodeBalancer() - defer func() { - By("Deleting new NodeBalancer") - deleteNodeBalancer(newID) - }() - - By("Annotating service with new NodeBalancer ID") - annotations[annLinodeNodeBalancerID] = strconv.Itoa(newID) - updateServiceWithAnnotations(labels, annotations, servicePorts, false) - - By("Checking the service's NodeBalancer ID") - checkNodeBalancerID(framework.TestServerResourceName, newID) - - By("Checking the old NodeBalancer exists") - checkNodeBalancerExists(nodeBalancerID) - }) - }) - - Context("With Node Addition", func() { - var ( - pods []string - labels map[string]string - ) - - BeforeEach(func() { - Skip("skip until rewritten to drop terraform") - pods = []string{"test-pod-node-add"} - ports := []core.ContainerPort{ - { - Name: "http-1", - ContainerPort: 8080, - }, - } - servicePorts := []core.ServicePort{ - { - Name: "http-1", - Port: 80, - TargetPort: intstr.FromInt(8080), - Protocol: "TCP", - }, - } - labels = map[string]string{ - "app": "test-loadbalancer", - } - - By("Creating Pods") - createPodWithLabel(pods, ports, framework.TestServerImage, labels, false) - - By("Creating Service") - createServiceWithSelector(labels, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - - By("Deleting the Newly Created Nodes") - deleteNewNode() - - By("Waiting for the Node to be removed") - checkNumberOfWorkerNodes(2) - }) - - It("should reach the same pod every time it requests", func() { - By("Adding a New Node") - addNewNode() - - By("Waiting for the Node to be Added to the NodeBalancer") - waitForNodeAddition() - }) - }) - - Context("For TCP Connection health check", func() { - var ( - pods []string - labels map[string]string - annotations map[string]string - - checkType = "connection" - interval = "10" - timeout = "5" - attempts = "4" - protocol = "tcp" - ) - BeforeEach(func() { - pods = []string{"test-pod-tcp"} - ports := []core.ContainerPort{ - { - Name: "http", - ContainerPort: 80, - }, - } - servicePorts := []core.ServicePort{ - { - Name: "http", - Port: 80, - TargetPort: intstr.FromInt(80), - Protocol: "TCP", - }, - } - - labels = map[string]string{ - "app": "test-loadbalancer", - } - annotations = map[string]string{ - annLinodeHealthCheckType: checkType, - annLinodeDefaultProtocol: protocol, - annLinodeHealthCheckInterval: interval, - annLinodeHealthCheckTimeout: timeout, - annLinodeHealthCheckAttempts: attempts, - } - - By("Creating Pod") - createPodWithLabel(pods, ports, "nginx", labels, false) - - By("Creating Service") - createServiceWithAnnotations(labels, annotations, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - }) - - It("should successfully check the health of 2 nodes", func() { - By("Checking NodeBalancer Configurations") - checkNodeBalancerConfigForPort(80, checkArgs{ - checkType: checkType, - interval: interval, - timeout: timeout, - attempts: attempts, - protocol: protocol, - checkNodes: true, - }) - }) - }) - - Context("For Passive Health Check", func() { - var ( - pods []string - labels map[string]string - annotations map[string]string - - checkType = "none" - checkPassive = "true" - ) - BeforeEach(func() { - pods = []string{"test-pod-passive-hc"} - ports := []core.ContainerPort{ - { - Name: "http", - ContainerPort: 80, - }, - } - servicePorts := []core.ServicePort{ - { - Name: "http", - Port: 80, - TargetPort: intstr.FromInt(80), - Protocol: "TCP", - }, - } - - labels = map[string]string{ - "app": "test-loadbalancer", - } - annotations = map[string]string{ - annLinodeHealthCheckPassive: checkPassive, - annLinodeHealthCheckType: checkType, - } - - By("Creating Pod") - createPodWithLabel(pods, ports, "nginx", labels, false) - - By("Creating Service") - createServiceWithAnnotations(labels, annotations, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - }) - - It("should successfully check the health of 2 nodes", func() { - By("Checking NodeBalancer Configurations") - checkNodeBalancerConfigForPort(80, checkArgs{ - checkType: checkType, - checkPassive: checkPassive, - checkNodes: true, - }) - }) - }) - - Context("For HTTP Status Health Check", func() { - var ( - pods []string - labels map[string]string - annotations map[string]string - - checkType = "http" - path = "/" - ) - BeforeEach(func() { - pods = []string{"test-pod-http-status"} - ports := []core.ContainerPort{ - { - Name: "http", - ContainerPort: 80, - }, - } - servicePorts := []core.ServicePort{ - { - Name: "http", - Port: 80, - TargetPort: intstr.FromInt(80), - Protocol: "TCP", - }, - } - - labels = map[string]string{ - "app": "test-loadbalancer", - } - annotations = map[string]string{ - annLinodeHealthCheckType: checkType, - annLinodeCheckPath: path, - annLinodeDefaultProtocol: "http", - } - - By("Creating Pod") - createPodWithLabel(pods, ports, "nginx", labels, false) - - By("Creating Service") - createServiceWithAnnotations(labels, annotations, servicePorts, false) - }) - - AfterEach(func() { - By("Deleting the Pods") - deletePods(pods) - - By("Deleting the Service") - deleteService() - }) - - It("should successfully check the health of 2 nodes", func() { - By("Checking NodeBalancer Configurations") - checkNodeBalancerConfigForPort(80, checkArgs{ - checkType: checkType, - path: path, - checkNodes: true, - }) - }) - }) - }) - }) -}) diff --git a/e2e/test/ccm_suite_test.go b/e2e/test/ccm_suite_test.go deleted file mode 100644 index 8f5c9ca8..00000000 --- a/e2e/test/ccm_suite_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package test - -import ( - "e2e_test/test/framework" - "flag" - "os" - "path/filepath" - "testing" - "time" - - "github.com/linode/linodego" - - "github.com/appscode/go/crypto/rand" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/tools/clientcmd" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" -) - -var ( - useExisting = false - reuse = false - clusterName string - region = "us-east" - k8s_version string - linodeURL = "https://api.linode.com" -) - -func init() { - flag.StringVar(&framework.Image, "image", framework.Image, "registry/repository:tag") - flag.StringVar(&framework.ApiToken, "api-token", os.Getenv("LINODE_API_TOKEN"), "linode api token") - flag.BoolVar(&reuse, "reuse", reuse, "Create a cluster and continue to use it") - flag.BoolVar(&useExisting, "use-existing", useExisting, "Use an existing kubernetes cluster") - flag.StringVar(&framework.KubeConfigFile, "kubeconfig", os.Getenv("TEST_KUBECONFIG"), "To use existing cluster provide kubeconfig file") - flag.StringVar(®ion, "region", region, "Region to create load balancers") - flag.StringVar(&k8s_version, "k8s_version", k8s_version, "k8s_version for child cluster") - flag.DurationVar(&framework.Timeout, "timeout", 5*time.Minute, "Timeout for a test to complete successfully") - flag.StringVar(&linodeURL, "linode-url", linodeURL, "The Linode API URL to send requests to") -} - -const ( - TIMEOUT = 5 * time.Minute -) - -var root *framework.Framework - -func TestE2e(t *testing.T) { - RegisterFailHandler(Fail) - SetDefaultEventuallyTimeout(framework.Timeout) - RunSpecs(t, "e2e Suite") -} - -var getLinodeClient = func() *linodego.Client { - linodeClient := linodego.NewClient(nil) - linodeClient.SetToken(framework.ApiToken) - linodeClient.SetBaseURL(linodeURL) - return &linodeClient -} - -var _ = BeforeSuite(func() { - if reuse { - clusterName = "ccm-linode-for-reuse" - } else { - clusterName = rand.WithUniqSuffix("ccm-linode") - } - - dir, err := os.Getwd() - Expect(err).NotTo(HaveOccurred()) - kubeConfigFile := filepath.Join(dir, clusterName+".conf") - - if reuse { - if _, err := os.Stat(kubeConfigFile); !os.IsNotExist(err) { - useExisting = true - framework.KubeConfigFile = kubeConfigFile - } - } - - if !useExisting { - err := framework.CreateCluster(clusterName, region, k8s_version) - Expect(err).NotTo(HaveOccurred()) - framework.KubeConfigFile = kubeConfigFile - } - - By("Using kubeconfig from " + framework.KubeConfigFile) - config, err := clientcmd.BuildConfigFromFlags("", framework.KubeConfigFile) - Expect(err).NotTo(HaveOccurred()) - - // Clients - kubeClient := kubernetes.NewForConfigOrDie(config) - linodeClient := getLinodeClient() - - // Framework - root = framework.New(config, kubeClient, *linodeClient) - - By("Using Namespace " + root.Namespace()) - err = root.CreateNamespace() - Expect(err).NotTo(HaveOccurred()) -}) - -var _ = AfterSuite(func() { - if !(useExisting || reuse) { - By("Deleting cluster") - err := framework.DeleteCluster(clusterName) - Expect(err).NotTo(HaveOccurred()) - } else { - By("Deleting Namespace " + root.Namespace()) - err := root.DeleteNamespace() - Expect(err).NotTo(HaveOccurred()) - - By("Not deleting cluster") - } -}) diff --git a/e2e/test/certificates/server.crt b/e2e/test/certificates/server.crt index 3b2c51b8..ccb2eaf7 100644 --- a/e2e/test/certificates/server.crt +++ b/e2e/test/certificates/server.crt @@ -1,32 +1,33 @@ -----BEGIN CERTIFICATE----- -MIIFcjCCA1qgAwIBAgIJAM2X8k3I/mF6MA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNV -BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX -aWRnaXRzIFB0eSBMdGQxCzAJBgNVBAMMAmNhMB4XDTIwMTEwOTE3NDU1NFoXDTI0 -MTAxOTE3NDU1NFowcjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlBBMRUwEwYDVQQH -DAxQaGlsYWRlbHBoaWExEzARBgNVBAoMCkxpbm9kZSBMTEMxFDASBgNVBAsMC0xp -bm9kZSBUZXN0MRQwEgYDVQQDDAtsaW5vZGUudGVzdDCCAiIwDQYJKoZIhvcNAQEB -BQADggIPADCCAgoCggIBANuycV24k/tGqbUrxNrhWpCUH/qsE+ZhhQjWkBUQQ3cc -S01BelMoJWX5U/SnVbSfZJrMmY/b4RZ8bM1p6FCtWP67pzmE3yZTQVadvHzuNx61 -LWhxQmZ3L7tAuE3XNPe9tadqT+Z4YF7/AbWXgwFWP4Y0iBoD9dVjS0lj9nmZ+Pqu -Ia+4lKlai0WFc+XuPPW9bBF6Qokg1rquhlkc7xAarh19BfNplvOAzH2tk63BuG8N -ItK+JylnIAd2re7Ed9g7xDxbBYhK3hYDbMe29SnBvp4MwN2Si/W74kCki445+PFT -6Ff8OZU9z2BiAI8LtrWzWPCenVHJ4uGh5yMIcARVW+dvfXiBAp3HLnlRMcPC7VwW -DlCk+APHaCst2QauAflmPDdEIROh/Lbi6wJeiFIfK3brryXN/PkCAw+iCq+fM7nr -LeqtP5Tb467jn1DGvhLAT1+/ecMg7qi+LVMn8kslMmDV3EOIqdpxUwWF6Ae+Yy7F -x1C4TgvuwG8c1njO+po3jYCs1CWs/tRFDC2DkrH4mBTJVn6Ii4pX+1jHUd2yx3HB -QAP0INwmRE1PwdtGpmAyP/tE72NbXdNYM6FMd1IwzwepqZ7YKw6EyEgzQL2YDzrf -Xm7OwBDOv4h4CELQJwkFC080OPIt1mZaT2G9iHljrDQl4jmTzfkk13nS+TNyFMFH -AgMBAAGjKzApMCcGA1UdEQQgMB6CC2xpbm9kZS50ZXN0gg93d3cubGlub2RlLnRl -c3QwDQYJKoZIhvcNAQELBQADggIBAEzwYvkEfTZJYQQTeLGdc9cjnDfhN1qYzgPY -eEYmU0V6bsKEqnJwZWl8vGVoOYY7Coe46lNTWMlYjNk3Of/qRCHiAiblGEUoMG24 -79tEaseLE28P4UDzLPQeO+z5JOdVkyKs++Q6o1Or9oQIFdtbRIKXwATEUgJSiMq7 -fyoDlWxFWNQ1bTJe6l+0rbZpIHpxbLnIimZUzgrYVlXHnPS2TSSLPo9vbxvvLFTJ -T6CF5fgdxkLq8NVbTHni/YjBMzk4q2D0WaoPO1Oeq+fAts+OrPDZMfXWmp4s7nPw -63cptWnY+5F30xdQFeXOSphyn2MZy6kZE0P4mU9rQaMyzOA2mA3QJTUhm2iB5K6+ -kzv9ydXuX2j0X9dapAAKrxJtGuzbkpvmsL3G3x0rfS9TrKhugu5x+EvlsZcx92Xr -rFSF2tokIg+T/Rev3YNdrq0wHLYJTjAvS5+HWhGCNN9gdaGqkpx1g7BWExl8xgeo -nsLWeEMyQfEOrZyEloTmzAW9qN1qilmR2keQpzvS1PuhMnNxh+eBQyRfeOatwyPQ -ofGA0ZvUwXnAQH/+G1MmYOKkxDCKi+5oMvhaNenQjj2KSwftgQUhSeCV9aAQu9Fa -WpBrglz+S4SPWetrRGfnrxBznReY507ZSdzzMhZEXb+YPWjLd/bFPIiniD+U4a5Z -7gxpALyz +MIIFvTCCA6WgAwIBAgIUBpS47ArkUC0MXYK3LvXU3eRh/CowDQYJKoZIhvcNAQEL +BQAwUjELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM +GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDELMAkGA1UEAwwCY2EwHhcNMjQxMTE1 +MTcxNjI1WhcNMjcxMTE1MTcxNjI1WjByMQswCQYDVQQGEwJVUzELMAkGA1UECAwC +UEExFTATBgNVBAcMDFBoaWxhZGVscGhpYTETMBEGA1UECgwKTGlub2RlIExMQzEU +MBIGA1UECwwLTGlub2RlIFRlc3QxFDASBgNVBAMMC2xpbm9kZS50ZXN0MIICIjAN +BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA27JxXbiT+0aptSvE2uFakJQf+qwT +5mGFCNaQFRBDdxxLTUF6UyglZflT9KdVtJ9kmsyZj9vhFnxszWnoUK1Y/runOYTf +JlNBVp28fO43HrUtaHFCZncvu0C4Tdc09721p2pP5nhgXv8BtZeDAVY/hjSIGgP1 +1WNLSWP2eZn4+q4hr7iUqVqLRYVz5e489b1sEXpCiSDWuq6GWRzvEBquHX0F82mW +84DMfa2TrcG4bw0i0r4nKWcgB3at7sR32DvEPFsFiEreFgNsx7b1KcG+ngzA3ZKL +9bviQKSLjjn48VPoV/w5lT3PYGIAjwu2tbNY8J6dUcni4aHnIwhwBFVb5299eIEC +nccueVExw8LtXBYOUKT4A8doKy3ZBq4B+WY8N0QhE6H8tuLrAl6IUh8rduuvJc38 ++QIDD6IKr58zuest6q0/lNvjruOfUMa+EsBPX795wyDuqL4tUyfySyUyYNXcQ4ip +2nFTBYXoB75jLsXHULhOC+7AbxzWeM76mjeNgKzUJaz+1EUMLYOSsfiYFMlWfoiL +ilf7WMdR3bLHccFAA/Qg3CZETU/B20amYDI/+0TvY1td01gzoUx3UjDPB6mpntgr +DoTISDNAvZgPOt9ebs7AEM6/iHgIQtAnCQULTzQ48i3WZlpPYb2IeWOsNCXiOZPN ++STXedL5M3IUwUcCAwEAAaNrMGkwJwYDVR0RBCAwHoILbGlub2RlLnRlc3SCD3d3 +dy5saW5vZGUudGVzdDAdBgNVHQ4EFgQUgNqzhL/JpxllvFu18qvlg/usDrEwHwYD +VR0jBBgwFoAUC2AMOf90/zpuQ588rPLfe7EukIUwDQYJKoZIhvcNAQELBQADggIB +AL38v8A0Yfi3Qcr7JtMJ+EOgiHo+W1PW05CAKrswqIZGb9pLwcc46N1ICX4/wItH +DfOmiLHEJ+eEaf07XWy1G+orvqsz6FLh2lfr1cne2DH1udiBXw2VyHDeaighgqTX +rHPcV9lLPcRgQgE8AC2WSn3Rmjd4eU+twlqYcJTLt3cy+TulwXxGBjn7CSmRamRA +AaURnVpsMhw9baINrN6+3zbjw1LKpMO3JfPx9NPw0iUYYbUWFMli2RTEwdR0o9Fu +Om6ogyYHHLTUDv2+cHYY4TKJ0LGz9PGB3iwdGbSSpLadjV7xkFERio5B4o/FedLB +CuECSIoWqjScSrVWjpIpG6b7LVkuDI7ZrZ6Rvkwcv4Zezx5TkynQUw9EezEgGRQf +RiBSKoPGKJfRGiYGNXDjqENX3kxqt5cuVe/Z0czrb+2zOMfaTZwJtp2rrJqckxBh +CK4CXQz2nsfGRW/lyJ1Jyc+ul0obXXhynDBA9dE5woCIwgTCRL9M0ZOHjoQi1tDh +27i0j4YzIvlIDIi6iex/XVZi9mhuRvDR7f7c5RVpHsu38znCLyQetFnwOQOmIVZI +lEUQvU1Jnk+e5+RqvOcZ0ZcLppBa71XjUdYm56mzY1ph04n1VUO4rmaI3wNBETGd +jJ3K7XuBBL/YT+02AzsZR/0fiHLdA9DbLUdhtRs0mb5u -----END CERTIFICATE----- diff --git a/e2e/test/certificates/server.csr b/e2e/test/certificates/server.csr index b869ff96..db02c077 100644 --- a/e2e/test/certificates/server.csr +++ b/e2e/test/certificates/server.csr @@ -1,5 +1,5 @@ -----BEGIN CERTIFICATE REQUEST----- -MIIE8TCCAtkCAQAwcjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlBBMRUwEwYDVQQH +MIIEtzCCAp8CAQAwcjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlBBMRUwEwYDVQQH DAxQaGlsYWRlbHBoaWExEzARBgNVBAoMCkxpbm9kZSBMTEMxFDASBgNVBAsMC0xp bm9kZSBUZXN0MRQwEgYDVQQDDAtsaW5vZGUudGVzdDCCAiIwDQYJKoZIhvcNAQEB BQADggIPADCCAgoCggIBANuycV24k/tGqbUrxNrhWpCUH/qsE+ZhhQjWkBUQQ3cc @@ -13,17 +13,16 @@ LeqtP5Tb467jn1DGvhLAT1+/ecMg7qi+LVMn8kslMmDV3EOIqdpxUwWF6Ae+Yy7F x1C4TgvuwG8c1njO+po3jYCs1CWs/tRFDC2DkrH4mBTJVn6Ii4pX+1jHUd2yx3HB QAP0INwmRE1PwdtGpmAyP/tE72NbXdNYM6FMd1IwzwepqZ7YKw6EyEgzQL2YDzrf Xm7OwBDOv4h4CELQJwkFC080OPIt1mZaT2G9iHljrDQl4jmTzfkk13nS+TNyFMFH -AgMBAAGgOjA4BgkqhkiG9w0BCQ4xKzApMCcGA1UdEQQgMB6CC2xpbm9kZS50ZXN0 -gg93d3cubGlub2RlLnRlc3QwDQYJKoZIhvcNAQELBQADggIBAMvG3QPZ9sut29dH -U+aTUQr8t5PO6hNdYBvXc6EqnpP06Z06eJ8wQwNSajwPxZMwd0yKEDq2IajZbtXs -wmPoSEwy1BHkNUmEw2m5hwfzLiN+KI9QYwPSxdfx8mf29uxkfXt/e/FiGNKrG0h7 -Cv/vLPfvT6AtiScPV0Ic7HwpC2QvdLVYIJAbfKjD3sKG/8rfoeyW3a2UF9qBqvhQ -oJvnnIOqHfX/et18rucuEK1ZnBt0ObTU7uPWF89NK5pA/UCYiyjZERHxYotqrVnx -63xXdEMSiXi6fA811i5pC946gOxwrxjnUfgZ127pmZ4k4AggNTw8W1KA9MfhBfmD -9yPbtGqeFzETGyZRw0i+MJ6FWF6s1WXRq2U3JjtnVO2z7Dchb4cYTmLfMFqDNYse -pAUrKeezY34qJ3ktrjyEcNBMsmZVDGHlfPRPAuMnYHelIvdNVrmHgU8Ry39sSlap -ow2nLjP5xFFvS4dLpx1s7pLlExb4ZG+BTEyEa18nVCaXCZSK51uXshwL3OLWnuzl -JBMFOuw3JtR02XZ/EUdgJG/Js8B1zAp8YuBwgx9uyh3WHSBfUFRPSz68h/zn6MP8 -y6dGvgl7tShru6KQi7GiboeEzA8brn5y3UDmXG9/Wz6zCKagoks6otwdqfTo02Sp -bK+R/OrA/rXS4jVp5vupXswrgmdh +AgMBAAGgADANBgkqhkiG9w0BAQsFAAOCAgEApsfWctJGSTDRVr/9aiYk8IGYlYLc +xsUJ3FjsT2hmKiAnBrRi2JICTU3HXp0rBlclsWsO0Oc/XyKyfHHxH/t4efDWVo49 +qM1KaAilqFVG1rNI+E7jFfheosgmV19xOBhI5rEL0vWraumEr8DSj76+Em0dbvev +f+dUV5cwbV9B9eAvymR0wvv2kr2zHl4ExdAr1KzIKH/juiVJID1SfQcAxSKLgVU+ +2z09R84EWI495+UZBUNlcQPANh+R7VsrKrSBYuP9ZFioZkWNwMDFsGCOlyqYu9kb +QhikKpUIBPmOYmcDd7PsqtmJWD/jfn6tPoR3KdP70F4008boG5h4Jcu3syP32QiX +TNb7Fk/EygTCaGrSsynoc0LmF40kJ5xt+hKNAjh8L52MOcMjYdthFKz/V4XJ/xJf +up3MoMRl32II2hqO7t4vHvKFanfHB91M5mLq3H6ZI8amNORZyf5K1mNDj4eOnPxT +d5v1KHFYGmpMaVdYGCN8IXJCShI4gC+BnCZUjHiGuhOosmNLjDVwg1gpUI2kDa0D +rVZVCTwE1ugFGic34VTxD6OmuRGZmoeLSl6cfc/NC+tpWfT+1d4lTAWa/F0pL5yX +PVu0CvIqV6PfAw+f2IAts1HZW3chEX7+TYB9oJplQk/HbJKHWb5j6No1Im0eSuSt +xKxoKTlLRyxpRJQ= -----END CERTIFICATE REQUEST----- diff --git a/e2e/test/framework/cluster.go b/e2e/test/framework/cluster.go deleted file mode 100644 index e40676d2..00000000 --- a/e2e/test/framework/cluster.go +++ /dev/null @@ -1,9 +0,0 @@ -package framework - -func CreateCluster(cluster, region, k8s_version string) error { - return RunScript("create_cluster.sh", ApiToken, cluster, Image, k8s_version, region) -} - -func DeleteCluster(clusterName string) error { - return RunScript("delete_cluster.sh", clusterName) -} diff --git a/e2e/test/framework/framework.go b/e2e/test/framework/framework.go deleted file mode 100644 index a54491e2..00000000 --- a/e2e/test/framework/framework.go +++ /dev/null @@ -1,90 +0,0 @@ -package framework - -import ( - "fmt" - "time" - - "github.com/appscode/go/crypto/rand" - "github.com/linode/linodego" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" -) - -var ( - Image = "linode/linode-cloud-controller-manager:latest" - ApiToken = "" - Timeout time.Duration - - KubeConfigFile = "" - TestServerResourceName = "e2e-test-server-" + rand.Characters(5) -) - -const ( - MaxRetry = 100 - TestServerImage = "appscode/test-server:2.3" -) - -type Framework struct { - restConfig *rest.Config - kubeClient kubernetes.Interface - namespace string - name string - - linodeClient linodego.Client -} - -func generateNamespaceName() string { - return rand.WithUniqSuffix("ccm") -} - -func New( - restConfig *rest.Config, - kubeClient kubernetes.Interface, - linodeClient linodego.Client, -) *Framework { - return &Framework{ - restConfig: restConfig, - kubeClient: kubeClient, - linodeClient: linodeClient, - - name: "cloud-controller-manager", - namespace: generateNamespaceName(), - } -} - -func (f *Framework) Invoke() *Invocation { - r := &rootInvocation{ - Framework: f, - app: rand.WithUniqSuffix("csi-driver-e2e"), - } - return &Invocation{ - rootInvocation: r, - LoadBalancer: &lbInvocation{rootInvocation: r}, - } -} - -func (f *Framework) Recycle() error { - if err := f.DeleteNamespace(); err != nil { - return fmt.Errorf("failed to delete namespace (%s)", f.namespace) - } - - f.namespace = generateNamespaceName() - if err := f.CreateNamespace(); err != nil { - return fmt.Errorf("failed to create namespace (%s)", f.namespace) - } - return nil -} - -type Invocation struct { - *rootInvocation - LoadBalancer *lbInvocation -} - -type rootInvocation struct { - *Framework - app string -} - -type lbInvocation struct { - *rootInvocation -} diff --git a/e2e/test/framework/loadbalancer_suite.go b/e2e/test/framework/loadbalancer_suite.go deleted file mode 100644 index d5a6d186..00000000 --- a/e2e/test/framework/loadbalancer_suite.go +++ /dev/null @@ -1,86 +0,0 @@ -package framework - -import ( - "context" - "fmt" - - "github.com/linode/linodego" -) - -func (i *lbInvocation) GetNodeBalancerFromService(svcName string, checkIP bool) (*linodego.NodeBalancer, error) { - ingress, err := i.getServiceIngress(svcName, i.Namespace()) - if err != nil { - return nil, err - } - hostname := ingress[0].Hostname - ip := ingress[0].IP - nbList, errListNodeBalancers := i.linodeClient.ListNodeBalancers(context.Background(), nil) - if errListNodeBalancers != nil { - return nil, fmt.Errorf("Error listingNodeBalancer for hostname %s: %s", hostname, errListNodeBalancers.Error()) - } - - for _, nb := range nbList { - if *nb.Hostname == hostname { - if checkIP { - if *nb.IPv4 == ip { - return &nb, nil - } else { - return nil, fmt.Errorf("IPv4 for Nodebalancer (%s) does not match IP (%s) for service %v", *nb.IPv4, ip, svcName) - } - } - return &nb, nil - } - } - return nil, fmt.Errorf("no NodeBalancer Found for service %v", svcName) -} - -func (i *lbInvocation) GetNodeBalancerID(svcName string) (int, error) { - nb, err := i.GetNodeBalancerFromService(svcName, false) - if err != nil { - return -1, err - } - return nb.ID, nil -} - -func (i *lbInvocation) GetNodeBalancerConfig(svcName string) (*linodego.NodeBalancerConfig, error) { - id, err := i.GetNodeBalancerID(svcName) - if err != nil { - return nil, err - } - nbcList, err := i.linodeClient.ListNodeBalancerConfigs(context.Background(), id, nil) - if err != nil { - return nil, err - } - return &nbcList[0], nil -} - -func (i *lbInvocation) GetNodeBalancerConfigForPort(svcName string, port int) (*linodego.NodeBalancerConfig, error) { - id, err := i.GetNodeBalancerID(svcName) - if err != nil { - return nil, err - } - nbConfigs, err := i.linodeClient.ListNodeBalancerConfigs(context.Background(), id, nil) - if err != nil { - return nil, err - } - - for _, config := range nbConfigs { - if config.Port == port { - return &config, nil - } - } - return nil, fmt.Errorf("NodeBalancerConfig for port %d was not found", port) -} - -func (i *lbInvocation) GetNodeBalancerUpNodes(svcName string) (int, error) { - id, err := i.GetNodeBalancerID(svcName) - if err != nil { - return 0, err - } - nbcList, err := i.linodeClient.ListNodeBalancerConfigs(context.Background(), id, nil) - if err != nil { - return 0, err - } - nb := &nbcList[0] - return nb.NodesStatus.Up, nil -} diff --git a/e2e/test/framework/namespace.go b/e2e/test/framework/namespace.go deleted file mode 100644 index c95207d6..00000000 --- a/e2e/test/framework/namespace.go +++ /dev/null @@ -1,26 +0,0 @@ -package framework - -import ( - "context" - - core "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func (f *Framework) Namespace() string { - return f.namespace -} - -func (f *Framework) CreateNamespace() error { - obj := &core.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: f.namespace, - }, - } - _, err := f.kubeClient.CoreV1().Namespaces().Create(context.TODO(), obj, metav1.CreateOptions{}) - return err -} - -func (f *Framework) DeleteNamespace() error { - return f.kubeClient.CoreV1().Namespaces().Delete(context.TODO(), f.namespace, deleteInForeground()) -} diff --git a/e2e/test/framework/node.go b/e2e/test/framework/node.go deleted file mode 100644 index 2ac0ad55..00000000 --- a/e2e/test/framework/node.go +++ /dev/null @@ -1,26 +0,0 @@ -package framework - -import ( - "context" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - masterLabel = "node-role.kubernetes.io/master" -) - -func (i *Invocation) GetNodeList() ([]string, error) { - workers := make([]string, 0) - nodes, err := i.kubeClient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) - if err != nil { - return nil, err - } - - for _, node := range nodes.Items { - if _, found := node.ObjectMeta.Labels[masterLabel]; !found { - workers = append(workers, node.Name) - } - } - return workers, nil -} diff --git a/e2e/test/framework/pod.go b/e2e/test/framework/pod.go deleted file mode 100644 index 46f307d7..00000000 --- a/e2e/test/framework/pod.go +++ /dev/null @@ -1,56 +0,0 @@ -package framework - -import ( - "context" - - core "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func (i *lbInvocation) GetPodObject(podName, image string, ports []core.ContainerPort, labels map[string]string) *core.Pod { - return &core.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: podName, - Namespace: i.Namespace(), - Labels: labels, - }, - Spec: core.PodSpec{ - Containers: []core.Container{ - { - Name: "server", - Image: image, - Env: []core.EnvVar{ - { - Name: "POD_NAME", - ValueFrom: &core.EnvVarSource{ - FieldRef: &core.ObjectFieldSelector{ - FieldPath: "metadata.name", - }, - }, - }, - }, - Ports: ports, - }, - }, - }, - } -} - -func (i *lbInvocation) SetNodeSelector(pod *core.Pod, nodeName string) *core.Pod { - pod.Spec.NodeSelector = map[string]string{ - "kubernetes.io/hostname": nodeName, - } - return pod -} - -func (i *lbInvocation) CreatePod(pod *core.Pod) (*core.Pod, error) { - return i.kubeClient.CoreV1().Pods(i.Namespace()).Create(context.TODO(), pod, metav1.CreateOptions{}) -} - -func (i *lbInvocation) DeletePod(name string) error { - return i.kubeClient.CoreV1().Pods(i.Namespace()).Delete(context.TODO(), name, deleteInForeground()) -} - -func (i *lbInvocation) GetPod(name, ns string) (*core.Pod, error) { - return i.kubeClient.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{}) -} diff --git a/e2e/test/framework/secret.go b/e2e/test/framework/secret.go deleted file mode 100644 index 8e910fd4..00000000 --- a/e2e/test/framework/secret.go +++ /dev/null @@ -1,116 +0,0 @@ -package framework - -import ( - "context" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - serverCert = `-----BEGIN CERTIFICATE----- -MIIFcjCCA1qgAwIBAgIJAM2X8k3I/mF6MA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNV -BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX -aWRnaXRzIFB0eSBMdGQxCzAJBgNVBAMMAmNhMB4XDTIwMTEwOTE3NDU1NFoXDTI0 -MTAxOTE3NDU1NFowcjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlBBMRUwEwYDVQQH -DAxQaGlsYWRlbHBoaWExEzARBgNVBAoMCkxpbm9kZSBMTEMxFDASBgNVBAsMC0xp -bm9kZSBUZXN0MRQwEgYDVQQDDAtsaW5vZGUudGVzdDCCAiIwDQYJKoZIhvcNAQEB -BQADggIPADCCAgoCggIBANuycV24k/tGqbUrxNrhWpCUH/qsE+ZhhQjWkBUQQ3cc -S01BelMoJWX5U/SnVbSfZJrMmY/b4RZ8bM1p6FCtWP67pzmE3yZTQVadvHzuNx61 -LWhxQmZ3L7tAuE3XNPe9tadqT+Z4YF7/AbWXgwFWP4Y0iBoD9dVjS0lj9nmZ+Pqu -Ia+4lKlai0WFc+XuPPW9bBF6Qokg1rquhlkc7xAarh19BfNplvOAzH2tk63BuG8N -ItK+JylnIAd2re7Ed9g7xDxbBYhK3hYDbMe29SnBvp4MwN2Si/W74kCki445+PFT -6Ff8OZU9z2BiAI8LtrWzWPCenVHJ4uGh5yMIcARVW+dvfXiBAp3HLnlRMcPC7VwW -DlCk+APHaCst2QauAflmPDdEIROh/Lbi6wJeiFIfK3brryXN/PkCAw+iCq+fM7nr -LeqtP5Tb467jn1DGvhLAT1+/ecMg7qi+LVMn8kslMmDV3EOIqdpxUwWF6Ae+Yy7F -x1C4TgvuwG8c1njO+po3jYCs1CWs/tRFDC2DkrH4mBTJVn6Ii4pX+1jHUd2yx3HB -QAP0INwmRE1PwdtGpmAyP/tE72NbXdNYM6FMd1IwzwepqZ7YKw6EyEgzQL2YDzrf -Xm7OwBDOv4h4CELQJwkFC080OPIt1mZaT2G9iHljrDQl4jmTzfkk13nS+TNyFMFH -AgMBAAGjKzApMCcGA1UdEQQgMB6CC2xpbm9kZS50ZXN0gg93d3cubGlub2RlLnRl -c3QwDQYJKoZIhvcNAQELBQADggIBAEzwYvkEfTZJYQQTeLGdc9cjnDfhN1qYzgPY -eEYmU0V6bsKEqnJwZWl8vGVoOYY7Coe46lNTWMlYjNk3Of/qRCHiAiblGEUoMG24 -79tEaseLE28P4UDzLPQeO+z5JOdVkyKs++Q6o1Or9oQIFdtbRIKXwATEUgJSiMq7 -fyoDlWxFWNQ1bTJe6l+0rbZpIHpxbLnIimZUzgrYVlXHnPS2TSSLPo9vbxvvLFTJ -T6CF5fgdxkLq8NVbTHni/YjBMzk4q2D0WaoPO1Oeq+fAts+OrPDZMfXWmp4s7nPw -63cptWnY+5F30xdQFeXOSphyn2MZy6kZE0P4mU9rQaMyzOA2mA3QJTUhm2iB5K6+ -kzv9ydXuX2j0X9dapAAKrxJtGuzbkpvmsL3G3x0rfS9TrKhugu5x+EvlsZcx92Xr -rFSF2tokIg+T/Rev3YNdrq0wHLYJTjAvS5+HWhGCNN9gdaGqkpx1g7BWExl8xgeo -nsLWeEMyQfEOrZyEloTmzAW9qN1qilmR2keQpzvS1PuhMnNxh+eBQyRfeOatwyPQ -ofGA0ZvUwXnAQH/+G1MmYOKkxDCKi+5oMvhaNenQjj2KSwftgQUhSeCV9aAQu9Fa -WpBrglz+S4SPWetrRGfnrxBznReY507ZSdzzMhZEXb+YPWjLd/bFPIiniD+U4a5Z -7gxpALyz ------END CERTIFICATE-----` - serverKey = `-----BEGIN RSA PRIVATE KEY----- -MIIJKAIBAAKCAgEA27JxXbiT+0aptSvE2uFakJQf+qwT5mGFCNaQFRBDdxxLTUF6 -UyglZflT9KdVtJ9kmsyZj9vhFnxszWnoUK1Y/runOYTfJlNBVp28fO43HrUtaHFC -Zncvu0C4Tdc09721p2pP5nhgXv8BtZeDAVY/hjSIGgP11WNLSWP2eZn4+q4hr7iU -qVqLRYVz5e489b1sEXpCiSDWuq6GWRzvEBquHX0F82mW84DMfa2TrcG4bw0i0r4n -KWcgB3at7sR32DvEPFsFiEreFgNsx7b1KcG+ngzA3ZKL9bviQKSLjjn48VPoV/w5 -lT3PYGIAjwu2tbNY8J6dUcni4aHnIwhwBFVb5299eIECnccueVExw8LtXBYOUKT4 -A8doKy3ZBq4B+WY8N0QhE6H8tuLrAl6IUh8rduuvJc38+QIDD6IKr58zuest6q0/ -lNvjruOfUMa+EsBPX795wyDuqL4tUyfySyUyYNXcQ4ip2nFTBYXoB75jLsXHULhO -C+7AbxzWeM76mjeNgKzUJaz+1EUMLYOSsfiYFMlWfoiLilf7WMdR3bLHccFAA/Qg -3CZETU/B20amYDI/+0TvY1td01gzoUx3UjDPB6mpntgrDoTISDNAvZgPOt9ebs7A -EM6/iHgIQtAnCQULTzQ48i3WZlpPYb2IeWOsNCXiOZPN+STXedL5M3IUwUcCAwEA -AQKCAgBgau3p7cm0K4zrX+wjC2fNr9RhFQgewYm7GT9enyacraQ2oZfnyuSu3j+E -TbQFczaZ4VU7l4ovbifp9qLoVUuLcBux2Kh+j2dLdip0wa8bIPRus9YqVgBys7Kv -JtWuLGn+sV+jjAzvZAcCBR6PhaSXZ5KbqEVJgyxVZzOSpopoqedK0T0dHgmlVy5I -KMhEKP+2o+tzdyAGCfYYQeSBMtRbSLVF4H9JGqukNHttdGlXA3LW/nD9cK7T17f5 -4+uc0I4M1v2UlRbmnlYtSBRMYSUhBAPYuioGjJB9QjmlD7g7YVHE24MCBoBuklQg -c0macL2FzHbKoEmcMIvaCifvHu8X0J5qjZghmi7Zozh/Skg9B4XINdHpX7vX7INZ -A7z2nx5x4xaNPO3hJJJkbpCcpSIEQkuqe8a/GYcn0tTMTqoGXr/OFz+ut1ZzZThs -YL8YWh2SqVOzR8xJE3cR9qd/ISTl1CPrxWyWm3eOZ0WGOKZTzUIN3p8gcDIDucs4 -kXGDCh7tj7EsYWpa0fnEp5n8kupLWPY050aal898xPP4RDNQFx/VdDBfa/PVKKMy -OzXFq801UoOdF9d6FR3p3YS5O0Zd8UILJQui3s2dpY6/BzuWa2ch9PwvEFI8rsT6 -8VxRCEG9gJxA/GSV/ZNU4hH3Tiv7fSG/aED/uUSvI/t7AWgQgQKCAQEA+Xrshwnt -Cp0cDdkHde/0WnT3DUEvYM0tlJY6z1YR5Kx0GL4zR+yhBuTfmgCMsbkNLvHsc3Us -UbwM4OSAD0oHMa6LCYer6fiYWfv4c19gCtLCZhjBPYHSwXGaQxdjiEE4N6J+mnPW -n39DCjXhl//WlatbLkZRbGYnbORfcE2Kx72OAJt2ujp0Jr/Loi1px6KMbKnzhEhy -mI6FPejx1h8KC5xlCq6faUnal1ZvdNc5WkxtZ1YOCzaKbVuGEok3bFK986aSYYlP -AI4SMo0M/Sy/5tlb9CL5H8s4Dbz35CRyKmXYMQYeGtJ/7HTSdrU7qcp4EZTu5RVX -1xtq6S+w4/V3JwKCAQEA4XBDaxw2B5ica9xxTAzzq7H9QtGgtYaBIQmkBVqVvoDs -ywGbe7ueJFY7id2rWdeDB7Nxt6feoTuoyXmA3YYAeUBQZGtLKc3MZfdIFJt6yM1D -6FZyITwo0Zl6ShPxIYsc94BRA7YzmQWaucByrRFLX+y463u2UGqD9s3aPZm921mb -oweIkEQiD2lJNqhx0gRphN+Le+0z7Gh+1ZxI8XikSIkuQ+nvuh5zQA/lqmWr4E9m -EICTP6D5lvJj3EpKZ1pUgHvPEy/fyUq+i7nu0hS394blI6amv2iwmrLhe2NafCHu -+Nux305uO8jqHzEl+l1CvGf0BqNXCM3x5CgLMJW44QKCAQBpmRpc3lqzT2T8h4yc -4wBu+WtI9Pp04uQULLKf6DKStFw/zOIv430VSfNLYEgtQcLOyB/pjwM/ZXWeC5oY -3qDE6rh3RDIESvFRxVGYpBom+qbGSFwjCLyInOlK1K+QkOqWwfUMs1N5F4js3Xmr -uOK/X1Ss9Z6pX2P4t4GeK3Q+r4FXyHYsxWk8rZon/0jy81608ArfRzsaT9keJ2eV -1nWODJjIOLnI+zXHMRLkReVEz2zPfKFdJazaNQ8+8U3AUBWO+EalelUySvBw7Ts+ -Pp7Lu90sLVF9n6sORZo3uyWHxKwJtCkx+T+kep5LGNM0PzsrVfr4hFw19KkAIuug -0dmpAoIBAQCbbix9b+DskdLfJwjSV2e1bC1iYWe9YDQtlBkLO+5cf0VDniMWRz/8 -a5v3LOdUNRt5NsZjypDbd2ejKWuo0BgJgUcsRTF4bBTOBJUk6CHaynNUgC2GLpUy -FfBTnLY221QobMbumTOwAEYyZbZrDq56P5sreIs1nIrJohojOJnG31xIJgyI8wDM -wVmiHrcDBtm9q+belaekClPQcUV1fyk9fZ9xYZxQJWhutccyGZFMQVHsdMmRKCqN -YSdqnan44jW6tCIMZ4iSnz8K1TIMlA5W0iGv19nFxKdmsYh26wRa64Z4+/gCL3Af -NiH9SYSWvrAheEauQPXj8yIgnV9BqyjhAoIBAA0NGugiXqloQD4tKFYROZ2rm1kx -IlbC5rVePSeMz59Qty79dODAvGuJxOb/vKOlQqcULfgidpctBdtZJ/oencwOf/49 -e0R5uYpvsxyvAro5OKxk0SD2YSgkdBf8gF5+opG6ZjcBcRk3jp8cdYDTIpViJco5 -IJwbMqoWpJxuilj0imxDNQPPoN6yf3mkD2tyYp2YL9X5bgSB58l1LCBJDdJDC4tR -rrXq0Btn9jpwwW/AJ6mIFWWGQKDpkGhLRHxOOK4dC+XgbkEogDSOlZDOEALLvFI9 -OVIIxvytGW/Qy6AEzsMnsTPUJMyPsktCQ2YI628dytmqXOniZe1QQ2R7dzw= ------END RSA PRIVATE KEY-----` -) - -func (i *lbInvocation) CreateTLSSecret(secretName string) (err error) { - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - }, - Data: map[string][]byte{ - corev1.TLSCertKey: []byte(serverCert), - corev1.TLSPrivateKeyKey: []byte(serverKey), - }, - Type: corev1.SecretTypeTLS, - } - - _, err = i.kubeClient.CoreV1().Secrets(i.Namespace()).Create(context.TODO(), secret, metav1.CreateOptions{}) - - return err -} - -func (i *lbInvocation) DeleteSecret(name string) error { - err := i.kubeClient.CoreV1().Secrets(i.Namespace()).Delete(context.TODO(), name, metav1.DeleteOptions{}) - return err -} diff --git a/e2e/test/framework/service.go b/e2e/test/framework/service.go deleted file mode 100644 index e1c1d8be..00000000 --- a/e2e/test/framework/service.go +++ /dev/null @@ -1,137 +0,0 @@ -package framework - -import ( - "context" - "fmt" - - core "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/watch" - "k8s.io/client-go/util/retry" -) - -func (i *lbInvocation) createOrUpdateService(selector, annotations map[string]string, ports []core.ServicePort, isSessionAffinityClientIP, isCreate bool) error { - var sessionAffinity core.ServiceAffinity = "None" - if isSessionAffinityClientIP { - sessionAffinity = "ClientIP" - } - svc := &core.Service{ - ObjectMeta: metav1.ObjectMeta{ - Name: TestServerResourceName, - Namespace: i.Namespace(), - Annotations: annotations, - Labels: map[string]string{ - "app": "test-server-" + i.app, - }, - }, - Spec: core.ServiceSpec{ - Ports: ports, - Selector: selector, - Type: core.ServiceTypeLoadBalancer, - SessionAffinity: sessionAffinity, - }, - } - - service := i.kubeClient.CoreV1().Services(i.Namespace()) - if isCreate { - _, err := service.Create(context.TODO(), svc, metav1.CreateOptions{}) - if err != nil { - return err - } - } else { - if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - options := metav1.GetOptions{} - resource, err := service.Get(context.TODO(), TestServerResourceName, options) - if err != nil { - return err - } - svc.ObjectMeta.ResourceVersion = resource.ResourceVersion - svc.Spec.ClusterIP = resource.Spec.ClusterIP - _, err = service.Update(context.TODO(), svc, metav1.UpdateOptions{}) - return err - }); err != nil { - return err - } - } - return nil -} - -func (i *lbInvocation) GetServiceWatcher() (watch.Interface, error) { - var timeoutSeconds int64 = 30 - watcher, err := i.kubeClient.CoreV1().Events(i.Namespace()).Watch(context.TODO(), metav1.ListOptions{ - FieldSelector: "involvedObject.kind=Service", - Watch: true, - TimeoutSeconds: &timeoutSeconds, - }) - if err != nil { - return nil, err - } - return watcher, nil -} - -func (i *lbInvocation) GetService() (*core.Service, error) { - return i.kubeClient.CoreV1().Services(i.Namespace()).Get(context.TODO(), TestServerResourceName, metav1.GetOptions{}) -} - -func (i *lbInvocation) CreateService(selector, annotations map[string]string, ports []core.ServicePort, isSessionAffinityClientIP bool) error { - return i.createOrUpdateService(selector, annotations, ports, isSessionAffinityClientIP, true) -} - -func (i *lbInvocation) UpdateService(selector, annotations map[string]string, ports []core.ServicePort, isSessionAffinityClientIP bool) error { - err := i.deleteEvents() - if err != nil { - return err - } - return i.createOrUpdateService(selector, annotations, ports, isSessionAffinityClientIP, false) -} - -func (i *lbInvocation) DeleteService() error { - return i.kubeClient.CoreV1().Services(i.Namespace()).Delete(context.TODO(), TestServerResourceName, metav1.DeleteOptions{}) -} - -func (i *lbInvocation) GetServiceEndpoints() ([]core.EndpointAddress, error) { - ep, err := i.kubeClient.CoreV1().Endpoints(i.Namespace()).Get(context.TODO(), TestServerResourceName, metav1.GetOptions{}) - if err != nil { - return nil, err - } - if len(ep.Subsets) == 0 { - return nil, fmt.Errorf("No service endpoints found for %s", TestServerResourceName) - } - return ep.Subsets[0].Addresses, err -} - -func (i *lbInvocation) deleteEvents() error { - return i.kubeClient.CoreV1().Events(i.Namespace()).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{FieldSelector: "involvedObject.kind=Service"}) -} - -func (i *lbInvocation) GetLoadBalancerIps() ([]string, error) { - svc, err := i.kubeClient.CoreV1().Services(i.Namespace()).Get(context.TODO(), TestServerResourceName, metav1.GetOptions{}) - if err != nil { - return nil, err - } - var serverAddr []string - for _, ingress := range svc.Status.LoadBalancer.Ingress { - if len(svc.Spec.Ports) > 0 { - for _, port := range svc.Spec.Ports { - if port.NodePort > 0 { - serverAddr = append(serverAddr, fmt.Sprintf("%s:%d", ingress.IP, port.Port)) - } - } - } - } - if serverAddr == nil { - return nil, fmt.Errorf("failed to get Status.LoadBalancer.Ingress for service %s/%s", TestServerResourceName, i.Namespace()) - } - return serverAddr, nil -} - -func (i *lbInvocation) getServiceIngress(name, namespace string) ([]core.LoadBalancerIngress, error) { - svc, err := i.kubeClient.CoreV1().Services(namespace).Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - if svc.Status.LoadBalancer.Ingress == nil { - return nil, fmt.Errorf("Status.LoadBalancer.Ingress is empty for %s", name) - } - return svc.Status.LoadBalancer.Ingress, nil -} diff --git a/e2e/test/framework/util.go b/e2e/test/framework/util.go deleted file mode 100644 index 31379256..00000000 --- a/e2e/test/framework/util.go +++ /dev/null @@ -1,180 +0,0 @@ -package framework - -import ( - "context" - "crypto/tls" - "crypto/x509" - "io" - "log" - "net" - "net/http" - "os" - "os/exec" - "path" - "strings" - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - scriptDirectory = "scripts" - RetryInterval = 5 * time.Second - RetryTimeout = 15 * time.Minute - caCert = `-----BEGIN CERTIFICATE----- -MIIFejCCA2KgAwIBAgIJAN7D2Ju254yUMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNV -BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX -aWRnaXRzIFB0eSBMdGQxCzAJBgNVBAMMAmNhMB4XDTE5MDQwOTA5MzYxNFoXDTI5 -MDQwNjA5MzYxNFowUjELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUx -ITAfBgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDELMAkGA1UEAwwCY2Ew -ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDoTwE1kijjrhCcGXSPyHlf -7NngxPCFuFqVdRvG4DrrdL7YW3iEovAXTbuoyiPpF/U9T5BfDVs2dCEHGlpiOADR -tA/Z5mFbVcefOCBL+rL2sTN2o19U7eimcZjH1xN1L5j2RkYmRAoI+nwG/g5NehOu -YM930oPqe3vOYevOHBCebHuKc7zaM31AtKcDG0IjIJ1ZdJy91+rx8Prb+IxTIKZl -Ca/e0e6iZWCPp5kaJyNUGZkjjcRVzFM79xVf34DEuS+N1RZP7EevM0bfHehJfSpU -M6gfsrL9WctD0nGJd2YsH9hLCub2G7emgiV7dvN1R0QW9ijguwZ9aBemiat5AnGs -QHSR+WRijZNjHTWY4DEaTNWecDd2Tz37RNN9Ow8FThERwZVnpji1kcijEg4g7Ppy -9P6tdavjkFVW0xOieInjS/m5Bxj2a44UT1JshNr1M4HGXvqUcCFS4vhytIc05lOv -X20NR+C+RgNy7G14Hz/3+qRo9hlkonyTJAoU++2vgsaNmmhcU6fGgYpARHm1Y675 -pGrgZAcjFcsG84q0dSdr6AeY+6+1UyS6pktBobXIiciSPmseHJ24dRd06OYQMxQ3 -ccOZhZ3cNy8OMT9eUwcjnif36BVmZdCObJexqXq/cSVX3IhhaQhLLfN9ZyGDkxWl -N5ehRMCabgv3mQCDd/9HMwIDAQABo1MwUTAdBgNVHQ4EFgQUC2AMOf90/zpuQ588 -rPLfe7EukIUwHwYDVR0jBBgwFoAUC2AMOf90/zpuQ588rPLfe7EukIUwDwYDVR0T -AQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAHopjHkeIciVtlAyAPEfh/pnf -r91H1aQMPmHisqlveM3Bz9MOIa9a26YO+ZzCPozALxkJIjdp7L3L8Q8CuLmkC4YV -6nHvSLaC/82UGoiRGyjdFh30puqekWMZ62ZrQLpCr0DzOJrarslLM0fONqpjDTWP -8OXyRcnVSbFB1n5XUoviMTTxYOQ3HQe8b3Tt7GO/9w6dWkkSX1Vy4RmzNt7fb9K5 -mxu/n+SVu+2iQX9oEWq2rpvsD3RGnhewCPlZU8NQYKb72K00kEcG/J+WU1IPtkq0 -JaU5TDMMzfp3PMYxCzYD9pdM8J0N0zJac2t9hkx7H83jy/TfLrmDvB6nCK8N3+6j -8In6RwYw4XJ41AWsJpGXBpvYCq5GJjdogEi9IaBXSmtVPYm0NURYbephk+Wg0oyk -ESk4cyWUhYG8mcMyORc8lzOQ79YT6A5QnitTGCVQGTlnNRjevtfhAFEXr9e8UZFq -oWtfEdltH6ElGDpivwuOERAN9v3GoPlifpo1UDElnPJft+C0cRv0YpPwvwJTy1MU -q1op/4Z/7SHzFWTSyRZqvI41AsLImylzfZ0w9U8sogd4pHv30kGc9+LhqrsfLDvK -9XedVoWJx/x3i8BUhVDyd4FyVWHCf9N/6a9HzbFWT8QZTBk5pErTaFiTi5TQxoi7 -ER4ILjvRX7mLWUGhN58= ------END CERTIFICATE-----` - Domain = "linode.test" -) - -func RunScript(script string, args ...string) error { - wd, err := os.Getwd() - if err != nil { - return err - } - - return runCommand(path.Join(wd, scriptDirectory, script), args...) -} - -func runCommand(cmd string, args ...string) error { - c := exec.Command(cmd, args...) - c.Stdout = os.Stdout - c.Stderr = os.Stderr - log.Printf("Running command %q\n", cmd) - return c.Run() -} - -func deleteInForeground() metav1.DeleteOptions { - policy := metav1.DeletePropagationForeground - graceSeconds := int64(0) - return metav1.DeleteOptions{ - PropagationPolicy: &policy, - GracePeriodSeconds: &graceSeconds, - } -} - -func getHTTPSResponse(domain, ip, port string) (string, error) { - rootCAs, _ := x509.SystemCertPool() - if rootCAs == nil { - rootCAs = x509.NewCertPool() - } - - if ok := rootCAs.AppendCertsFromPEM([]byte(caCert)); !ok { - log.Println("No certs appended, using system certs only") - } - - config := &tls.Config{ - RootCAs: rootCAs, - } - - dialer := &net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - } - dialContext := func(ctx context.Context, network, addr string) (net.Conn, error) { - if addr == domain+":"+port { - addr = ip + ":" + port - } - return dialer.DialContext(ctx, network, addr) - } - - tr := &http.Transport{ - TLSClientConfig: config, - DialContext: dialContext, - } - client := &http.Client{Transport: tr} - - log.Println("Waiting for response from https://" + ip + ":" + port) - u := "https://" + domain + ":" + port - req, err := http.NewRequest(http.MethodGet, u, nil) - if err != nil { - return "", err - } - resp, err := client.Do(req) - if err != nil { - return "", err - } - defer resp.Body.Close() - - bodyBytes, err := io.ReadAll(resp.Body) - if err != nil { - return "", err - } - bodyString := string(bodyBytes) - - return bodyString, nil -} - -func WaitForHTTPSResponse(link string) (string, error) { - hostPort := strings.Split(link, ":") - host, port := hostPort[0], hostPort[1] - - resp, err := getHTTPSResponse(Domain, host, port) - if err != nil { - return "", err - } - return resp, nil -} - -func getHTTPResponse(link string) (bool, string, error) { - resp, err := http.Get("http://" + link) - if err != nil { - return false, "", err - } - defer resp.Body.Close() - - bodyBytes, err := io.ReadAll(resp.Body) - if err != nil { - return false, "", err - } - return resp.StatusCode == 200, string(bodyBytes), nil -} - -func WaitForHTTPResponse(link string) (string, error) { - ok, resp, err := getHTTPResponse(link) - if err != nil { - return "", err - } - if ok { - return resp, nil - } - return "", nil -} - -func GetResponseFromCurl(endpoint string) string { - resp, err := exec.Command("curl", "--max-time", "5", "-s", endpoint).Output() - if err != nil { - return "" - } - return string(resp) -} diff --git a/e2e/test/fw-use-specified-nb/chainsaw-test.yaml b/e2e/test/fw-use-specified-nb/chainsaw-test.yaml new file mode 100644 index 00000000..fb0a0148 --- /dev/null +++ b/e2e/test/fw-use-specified-nb/chainsaw-test.yaml @@ -0,0 +1,124 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: fw-use-specified-nb +spec: + bindings: + - name: fwname + value: (join('-', ['ccm-fwtest', env('CLUSTER_NAME')])) + namespace: "fw-use-specified-nb" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create firewall, Create pods and services + try: + - script: + env: + - name: FWLABEL + value: ($fwname) + content: | + set -e + + create_fw=$(curl -s --write-out "%{http_code}\n" --output /dev/null --request POST \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/networking/firewalls" \ + --data " + { + \"label\": \"$FWLABEL\", + \"rules\": { + \"inbound\": [{ + \"action\": \"ACCEPT\", + \"label\": \"inbound-rule123\", + \"description\": \"inbound rule123\", + \"ports\": \"4321\", + \"protocol\": \"TCP\", + \"addresses\": { + \"ipv4\": [\"0.0.0.0/0\"] + } + }], + \"inbound_policy\": \"ACCEPT\", + \"outbound_policy\": \"ACCEPT\" + } + } + " + ) + + if [[ $create_fw == "200" ]]; then + echo "fw created" + fi + check: + ($error == null): true + (contains($stdout, 'fw created')): true + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Annotate service with nodebalancer id + try: + - script: + env: + - name: FWLABEL + value: ($fwname) + content: | + set -e + re='^[0-9]+$' + + fwid=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "X-Filter: {\"label\": \"$FWLABEL\"}" \ + "https://api.linode.com/v4/networking/firewalls" | jq .data[].id) + + if ! [[ $fwid =~ $re ]]; then + echo "Firewall id [$fwid] is incorrect, failed to fetch firewall" + exit 1 + fi + + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-firewall-id=$fwid + sleep 5 + + for i in {1..10}; do + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + fwconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/networking/firewalls/$fwid" || true) + + fw_attached_to_nb=$(echo $fwconfig | jq ".entities[] | select(.id == $nbid) | .id == $nbid") + + if [[ $fw_attached_to_nb == "true" ]]; then + echo "Conditions met" + break + fi + + sleep 10 + done + + curl -s -X DELETE \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/networking/firewalls/$fwid" + check: + ($error == null): true + (contains($stdout, 'Conditions met')): true diff --git a/e2e/test/fw-use-specified-nb/create-pods-services.yaml b/e2e/test/fw-use-specified-nb/create-pods-services.yaml new file mode 100644 index 00000000..00113a2f --- /dev/null +++ b/e2e/test/fw-use-specified-nb/create-pods-services.yaml @@ -0,0 +1,48 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: fw-use-specified-nb + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: fw-use-specified-nb + template: + metadata: + labels: + app: fw-use-specified-nb + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + name: svc-test + labels: + app: fw-use-specified-nb +spec: + type: LoadBalancer + selector: + app: fw-use-specified-nb + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-created-with-new-nb-id/chainsaw-test.yaml b/e2e/test/lb-created-with-new-nb-id/chainsaw-test.yaml new file mode 100644 index 00000000..df1a0952 --- /dev/null +++ b/e2e/test/lb-created-with-new-nb-id/chainsaw-test.yaml @@ -0,0 +1,121 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-created-with-new-nb-id +spec: + namespace: "lb-created-with-new-nb-id" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create nodebalancer and create resources + try: + - script: + outputs: + - name: nbconf + value: (json_parse($stdout)) + content: | + set -e + + re='^[0-9]+$' + LABEL="ccm-$(head /dev/urandom | tr -dc 'a-z0-9' | head -c 5)" + + nbid=$(curl -s --request POST \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers" \ + --data "{\"label\": \"$LABEL\", \"region\": \"$REGION\"}" | jq .id) + + if ! [[ $nbid =~ $re ]]; then + echo "Nodebalancer id [$nbid] is incorrect, failed to create nodebalancer" + exit 1 + fi + + echo "{\"id\": \"$nbid\"}" + check: + ($error == null): true + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Validate nodebalancer id + try: + - script: + content: | + set -e + + expectedId=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .metadata.annotations[]) + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + if [[ $nbid == $expectedId ]]; then + echo "Condition met" + fi + check: + ($error == null): true + (contains($stdout, 'Condition met')): true + - name: Create new nodebalancer and update service to use it + try: + - script: + content: | + set -e + + re='^[0-9]+$' + + old_nbid=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .metadata.annotations[]) + + LABEL="ccm-$(head /dev/urandom | tr -dc 'a-z0-9' | head -c 5)" + + nbid=$(curl -s --request POST \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers" \ + --data "{\"label\": \"$LABEL\", \"region\": \"$REGION\"}" | jq .id) + + if ! [[ $nbid =~ $re ]]; then + echo "Nodebalancer id [$nbid] is incorrect, failed to create nodebalancer" + exit 1 + fi + + kubectl annotate --overwrite svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id=$nbid + + for i in {1..10}; do + nbid2=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + if [[ $nbid == $nbid2 ]]; then + echo "Condition met" + break + fi + sleep 5 + done + + old_nb_resp=$(curl --write-out "%{http_code}\n" \ + --silent --output /dev/null \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/nodebalancers/$old_nbid") + + if [[ $old_nb_resp == "404" ]]; then + echo "old nodebalancer not found" + fi + check: + ($error == null): true + (contains($stdout, 'Condition met')): true + (contains($stdout, 'old nodebalancer not found')): true diff --git a/e2e/test/lb-created-with-new-nb-id/create-pods-services.yaml b/e2e/test/lb-created-with-new-nb-id/create-pods-services.yaml new file mode 100644 index 00000000..c37615c7 --- /dev/null +++ b/e2e/test/lb-created-with-new-nb-id/create-pods-services.yaml @@ -0,0 +1,49 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: created-with-new-nb-id + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: created-with-new-nb-id + template: + metadata: + labels: + app: created-with-new-nb-id + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + annotations: + service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id: ($nbconf.id) + labels: + app: created-with-new-nb-id +spec: + type: LoadBalancer + selector: + app: created-with-new-nb-id + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-created-with-specified-nb-id/chainsaw-test.yaml b/e2e/test/lb-created-with-specified-nb-id/chainsaw-test.yaml new file mode 100644 index 00000000..0b77dbe9 --- /dev/null +++ b/e2e/test/lb-created-with-specified-nb-id/chainsaw-test.yaml @@ -0,0 +1,73 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-created-with-specified-nb-id +spec: + namespace: "lb-created-with-specified-nb-id" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create nodebalancer and create resources + try: + - script: + outputs: + - name: nbconf + value: (json_parse($stdout)) + content: | + set -e + + re='^[0-9]+$' + LABEL="ccm-$(head /dev/urandom | tr -dc 'a-z0-9' | head -c 5)" + + nbid=$(curl -s --request POST \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers" \ + --data "{\"label\": \"$LABEL\", \"region\": \"$REGION\"}" | jq .id) + + if ! [[ $nbid =~ $re ]]; then + echo "Nodebalancer id [$nbid] is incorrect, failed to create nodebalancer" + exit 1 + fi + + echo "{\"id\": \"$nbid\"}" + check: + ($error == null): true + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Validate nodebalancer id + try: + - script: + content: | + set -e + + expectedId=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .metadata.annotations[]) + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + if [[ $nbid == $expectedId ]]; then + echo "Condition met" + fi + check: + ($error == null): true + (contains($stdout, 'Condition met')): true diff --git a/e2e/test/lb-created-with-specified-nb-id/create-pods-services.yaml b/e2e/test/lb-created-with-specified-nb-id/create-pods-services.yaml new file mode 100644 index 00000000..1d286209 --- /dev/null +++ b/e2e/test/lb-created-with-specified-nb-id/create-pods-services.yaml @@ -0,0 +1,49 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: created-with-specified-nb-id + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: created-with-specified-nb-id + template: + metadata: + labels: + app: created-with-specified-nb-id + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + annotations: + service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id: ($nbconf.id) + labels: + app: created-with-specified-nb-id +spec: + type: LoadBalancer + selector: + app: created-with-specified-nb-id + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-delete-svc-no-nb/chainsaw-test.yaml b/e2e/test/lb-delete-svc-no-nb/chainsaw-test.yaml new file mode 100644 index 00000000..723a5d35 --- /dev/null +++ b/e2e/test/lb-delete-svc-no-nb/chainsaw-test.yaml @@ -0,0 +1,124 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-delete-svc-no-nb +spec: + namespace: "lb-delete-svc-no-nb" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create nodebalancer and create resources + try: + - script: + outputs: + - name: nbconf + value: (json_parse($stdout)) + content: | + set -e + + re='^[0-9]+$' + LABEL="ccm-$(head /dev/urandom | tr -dc 'a-z0-9' | head -c 5)" + + nbid=$(curl -s --request POST \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers" \ + --data "{\"label\": \"$LABEL\", \"region\": \"$REGION\"}" | jq .id) + + if ! [[ $nbid =~ $re ]]; then + echo "Nodebalancer id [$nbid] is incorrect, failed to create nodebalancer" + exit 1 + fi + + echo "{\"id\": \"$nbid\"}" + check: + ($error == null): true + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Validate nodebalancer id + try: + - script: + content: | + set -e + + expectedId=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .metadata.annotations[]) + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + if [[ $nbid == $expectedId ]]; then + echo "Condition met" + fi + check: + ($error == null): true + (contains($stdout, 'Condition met')): true + - name: Delete nodebalancer, delete service and make sure its deleted + try: + - script: + content: | + set -e + + re='^[0-9]+$' + nbid=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .metadata.annotations[]) + + if ! [[ $nbid =~ $re ]]; then + echo "Nodebalancer id [$nbid] is incorrect" + exit 1 + fi + + # Delete nodebalancer + delete_resp=$(curl --write-out "%{http_code}\n" \ + --silent --output /dev/null \ + -X DELETE \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/nodebalancers/$nbid") + + if [[ $delete_resp == "200" ]]; then + echo "nodebalancer deleted" + fi + + # Check to make sure nodebalancer is deleted + old_nb_resp=$(curl --write-out "%{http_code}\n" \ + --silent --output /dev/null \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/nodebalancers/$nbid") + + if [[ $old_nb_resp == "404" ]]; then + echo "old nodebalancer not found" + fi + + # Delete service and make sure its deleted + kubectl --timeout=60s delete svc svc-test -n $NAMESPACE + + for i in {1..10}; do + if kubectl get svc svc-test -n $NAMESPACE > /dev/null 2>&1; then + sleep 5 + else + echo "service is deleted" + break + fi + done + check: + ($error == null): true + (contains($stdout, 'nodebalancer deleted')): true + (contains($stdout, 'old nodebalancer not found')): true + (contains($stdout, 'service is deleted')): true diff --git a/e2e/test/lb-delete-svc-no-nb/create-pods-services.yaml b/e2e/test/lb-delete-svc-no-nb/create-pods-services.yaml new file mode 100644 index 00000000..55ea60f9 --- /dev/null +++ b/e2e/test/lb-delete-svc-no-nb/create-pods-services.yaml @@ -0,0 +1,49 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: delete-svc-no-nb + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: delete-svc-no-nb + template: + metadata: + labels: + app: delete-svc-no-nb + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + annotations: + service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id: ($nbconf.id) + labels: + app: delete-svc-no-nb +spec: + type: LoadBalancer + selector: + app: delete-svc-no-nb + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-delete-svc-use-new-nbid/chainsaw-test.yaml b/e2e/test/lb-delete-svc-use-new-nbid/chainsaw-test.yaml new file mode 100644 index 00000000..7369d478 --- /dev/null +++ b/e2e/test/lb-delete-svc-use-new-nbid/chainsaw-test.yaml @@ -0,0 +1,121 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-delete-svc-use-new-nbid +spec: + namespace: "lb-delete-svc-use-new-nbid" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create nodebalancer and create resources + try: + - script: + outputs: + - name: nbconf + value: (json_parse($stdout)) + content: | + set -e + + re='^[0-9]+$' + LABEL="ccm-$(head /dev/urandom | tr -dc 'a-z0-9' | head -c 5)" + + nbid=$(curl -s --request POST \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers" \ + --data "{\"label\": \"$LABEL\", \"region\": \"$REGION\"}" | jq .id) + + if ! [[ $nbid =~ $re ]]; then + echo "Nodebalancer id [$nbid] is incorrect, failed to create nodebalancer" + exit 1 + fi + + echo "{\"id\": \"$nbid\"}" + check: + ($error == null): true + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Validate nodebalancer id + try: + - script: + content: | + set -e + + expectedId=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .metadata.annotations[]) + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + if [[ $nbid == $expectedId ]]; then + echo "Condition met" + fi + check: + ($error == null): true + (contains($stdout, 'Condition met')): true + - name: Create new nodebalancer and update service to use it + try: + - script: + content: | + set -e + + re='^[0-9]+$' + + old_nbid=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .metadata.annotations[]) + + LABEL="ccm-$(head /dev/urandom | tr -dc 'a-z0-9' | head -c 5)" + + nbid=$(curl -s --request POST \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers" \ + --data "{\"label\": \"$LABEL\", \"region\": \"$REGION\"}" | jq .id) + + if ! [[ $nbid =~ $re ]]; then + echo "Nodebalancer id [$nbid] is incorrect, failed to create nodebalancer" + exit 1 + fi + + kubectl annotate --overwrite svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id=$nbid + + for i in {1..10}; do + nbid2=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + if [[ $nbid == $nbid2 ]]; then + echo "Condition met" + break + fi + sleep 5 + done + + old_nb_resp=$(curl --write-out "%{http_code}\n" \ + --silent --output /dev/null \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/nodebalancers/$old_nbid") + + if [[ $old_nb_resp == "404" ]]; then + echo "old nodebalancer not found" + fi + check: + ($error == null): true + (contains($stdout, 'Condition met')): true + (contains($stdout, 'old nodebalancer not found')): true diff --git a/e2e/test/lb-delete-svc-use-new-nbid/create-pods-services.yaml b/e2e/test/lb-delete-svc-use-new-nbid/create-pods-services.yaml new file mode 100644 index 00000000..58815cf6 --- /dev/null +++ b/e2e/test/lb-delete-svc-use-new-nbid/create-pods-services.yaml @@ -0,0 +1,49 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: delete-svc-use-new-nbid + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: delete-svc-use-new-nbid + template: + metadata: + labels: + app: delete-svc-use-new-nbid + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + annotations: + service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id: ($nbconf.id) + labels: + app: delete-svc-use-new-nbid +spec: + type: LoadBalancer + selector: + app: delete-svc-use-new-nbid + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-delete-svc-use-specified-nb/chainsaw-test.yaml b/e2e/test/lb-delete-svc-use-specified-nb/chainsaw-test.yaml new file mode 100644 index 00000000..99ceb8e4 --- /dev/null +++ b/e2e/test/lb-delete-svc-use-specified-nb/chainsaw-test.yaml @@ -0,0 +1,73 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-delete-svc-use-specified-nb +spec: + namespace: "lb-delete-svc-use-specified-nb" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create nodebalancer and create resources + try: + - script: + outputs: + - name: nbconf + value: (json_parse($stdout)) + content: | + set -e + + re='^[0-9]+$' + LABEL="ccm-$(head /dev/urandom | tr -dc 'a-z0-9' | head -c 5)" + + nbid=$(curl -s --request POST \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers" \ + --data "{\"label\": \"$LABEL\", \"region\": \"$REGION\"}" | jq .id) + + if ! [[ $nbid =~ $re ]]; then + echo "Nodebalancer id [$nbid] is incorrect, failed to create nodebalancer" + exit 1 + fi + + echo "{\"id\": \"$nbid\"}" + check: + ($error == null): true + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Validate nodebalancer id + try: + - script: + content: | + set -e + + expectedId=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .metadata.annotations[]) + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + if [[ $nbid == $expectedId ]]; then + echo "Condition met" + fi + check: + ($error == null): true + (contains($stdout, 'Condition met')): true diff --git a/e2e/test/lb-delete-svc-use-specified-nb/create-pods-services.yaml b/e2e/test/lb-delete-svc-use-specified-nb/create-pods-services.yaml new file mode 100644 index 00000000..87461401 --- /dev/null +++ b/e2e/test/lb-delete-svc-use-specified-nb/create-pods-services.yaml @@ -0,0 +1,49 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: delete-svc-use-specified-nb + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: delete-svc-use-specified-nb + template: + metadata: + labels: + app: delete-svc-use-specified-nb + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + annotations: + service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id: ($nbconf.id) + labels: + app: delete-svc-use-specified-nb +spec: + type: LoadBalancer + selector: + app: delete-svc-use-specified-nb + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-fw-delete-acl/chainsaw-test.yaml b/e2e/test/lb-fw-delete-acl/chainsaw-test.yaml new file mode 100644 index 00000000..6d5bd6ed --- /dev/null +++ b/e2e/test/lb-fw-delete-acl/chainsaw-test.yaml @@ -0,0 +1,172 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-fw-delete-acl +spec: + namespace: "lb-fw-delete-acl" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Fetch Nodebalancer ID, make sure it has firewall attached + try: + - script: + content: | + set -e + + for i in {1..10}; do + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + fw=$(curl -s --request GET \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers/${nbid}/firewalls" || true) + + fwCount=$(echo $fw | jq '.data | length') + ips=$(echo $fw | jq '.data[].rules.inbound[].addresses.ipv4[]') + if [[ $fwCount -eq 1 && -n $ips && $ips == *"7.7.7.7/32"* ]]; then + echo "firewall attached and rule has specified ip" + break + fi + sleep 10 + done + check: + ($error == null): true + (contains($stdout, 'firewall attached and rule has specified ip')): true + - name: Delete ACL and check that firewall no longer exists + try: + - script: + content: | + set -e + + for i in {1..10}; do + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + fw=$(curl -s --request GET \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers/${nbid}/firewalls" || true) + + fwid=$(echo $fw | jq -r '.data[].id') + + # Patch service to remove ACL annotation + kubectl patch service svc-test -n $NAMESPACE --type=json -p='[{"op": "remove", "path": "/metadata/annotations/service.beta.kubernetes.io~1linode-loadbalancer-firewall-acl"}]' + sleep 5 + + # Check that firewall is no longer attached to nb + fw=$(curl -s --request GET \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers/${nbid}/firewalls" || true) + + fwCount=$(echo $fw | jq -r '.data | length') + + # Check if firewall is deleted + fwRespCode=$(curl -s -o /dev/null -w "%{http_code}" \ + --request GET \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/networking/firewalls/${fwid}" || true) + + if [[ $fwCount -eq 0 && $fwRespCode -eq "404" ]]; then + echo "firewall detatched and deleted" + break + fi + sleep 10 + done + check: + ($error == null): true + (contains($stdout, 'firewall detatched and deleted')): true + - name: Refresh service by adding the ACL again + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Service + - name: Delete service and make sure nb and fw are deleted automatically + try: + - script: + content: | + set -e + + nbid=0 + fwid=0 + + for i in {1..10}; do + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + fw=$(curl -s --request GET \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers/${nbid}/firewalls" || true) + + fwid=$(echo $fw | jq -r '.data[].id') + + if [[ $nbid -ne 0 && $fwid -ne 0 ]]; then + break + fi + sleep 2 + done + + if [[ $nbid -eq 0 || $fwid -eq 0 ]]; then + echo "nb or fw not found" + exit 1 + fi + + for i in {1..10}; do + # Remove service + kubectl delete service svc-test -n $NAMESPACE --ignore-not-found + sleep 5 + + # Check if nodebalancer is deleted + nbRespCode=$(curl -s -o /dev/null -w "%{http_code}" \ + --request GET \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers/${nbid}" || true) + + # Check if firewall is deleted + fwRespCode=$(curl -s -o /dev/null -w "%{http_code}" \ + --request GET \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/networking/firewalls/${fwid}" || true) + + if [[ $nbRespCode == "404" && $fwRespCode == "404" ]]; then + echo "nb and fw deleted" + break + fi + sleep 10 + done + check: + ($error == null): true + (contains($stdout, 'nb and fw deleted')): true diff --git a/e2e/test/lb-fw-delete-acl/create-pods-services.yaml b/e2e/test/lb-fw-delete-acl/create-pods-services.yaml new file mode 100644 index 00000000..2d020fdf --- /dev/null +++ b/e2e/test/lb-fw-delete-acl/create-pods-services.yaml @@ -0,0 +1,68 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: lb-fw-delete-acl + name: test +spec: + replicas: 2 + selector: + matchLabels: + app: lb-fw-delete-acl + template: + metadata: + labels: + app: lb-fw-delete-acl + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - simple-lb + topologyKey: kubernetes.io/hostname + weight: 100 + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + annotations: + service.beta.kubernetes.io/linode-loadbalancer-firewall-acl: | + { + "denyList": { + "ipv4": ["8.8.8.8/32", + "9.9.9.9/32", + "7.7.7.7/32"] + } + } + labels: + app: lb-fw-delete-acl +spec: + type: LoadBalancer + selector: + app: lb-fw-delete-acl + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-fw-update-acl/chainsaw-test.yaml b/e2e/test/lb-fw-update-acl/chainsaw-test.yaml new file mode 100644 index 00000000..15b05807 --- /dev/null +++ b/e2e/test/lb-fw-update-acl/chainsaw-test.yaml @@ -0,0 +1,93 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-fw-update-acl +spec: + namespace: "lb-fw-update-acl" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Fetch Nodebalancer ID, make sure it has firewall attached + try: + - script: + content: | + set -e + + for i in {1..10}; do + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + fw=$(curl -s --request GET \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers/${nbid}/firewalls" || true) + + fwCount=$(echo $fw | jq '.data | length') + ips=$(echo $fw | jq '.data[].rules.inbound[].addresses.ipv4[]') + if [[ $fwCount -eq 1 && -n $ips && $ips == *"7.7.7.7/32"* ]]; then + echo "firewall attached and rule has specified ip" + break + fi + sleep 10 + done + check: + ($error == null): true + (contains($stdout, 'firewall attached and rule has specified ip')): true + - name: Update service with new ACL + try: + - apply: + file: update-service.yaml + catch: + - describe: + apiVersion: v1 + kind: Service + - name: Fetch firewall ID and check rules are updated + try: + - script: + content: | + set -e + + for i in {1..10}; do + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + fw=$(curl -s --request GET \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers/${nbid}/firewalls" || true) + + fwCount=$(echo $fw | jq -r '.data | length') + ips=$(echo $fw | jq -r '.data[].rules.inbound[].addresses.ipv4[]') + if [[ $fwCount -eq 1 && -n $ips && ! $ips == *"7.7.7.7/32"* ]]; then + echo "firewall attached and rule updated" + break + fi + sleep 10 + done + check: + ($error == null): true + (contains($stdout, 'firewall attached and rule updated')): true diff --git a/e2e/test/lb-fw-update-acl/create-pods-services.yaml b/e2e/test/lb-fw-update-acl/create-pods-services.yaml new file mode 100644 index 00000000..ddcab7f6 --- /dev/null +++ b/e2e/test/lb-fw-update-acl/create-pods-services.yaml @@ -0,0 +1,68 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: lb-fw-update-acl + name: test +spec: + replicas: 2 + selector: + matchLabels: + app: lb-fw-update-acl + template: + metadata: + labels: + app: lb-fw-update-acl + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - simple-lb + topologyKey: kubernetes.io/hostname + weight: 100 + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + annotations: + service.beta.kubernetes.io/linode-loadbalancer-firewall-acl: | + { + "denyList": { + "ipv4": ["8.8.8.8/32", + "9.9.9.9/32", + "7.7.7.7/32"] + } + } + labels: + app: lb-fw-update-acl +spec: + type: LoadBalancer + selector: + app: lb-fw-update-acl + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-fw-update-acl/update-service.yaml b/e2e/test/lb-fw-update-acl/update-service.yaml new file mode 100644 index 00000000..f05597e0 --- /dev/null +++ b/e2e/test/lb-fw-update-acl/update-service.yaml @@ -0,0 +1,25 @@ +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + annotations: + service.beta.kubernetes.io/linode-loadbalancer-firewall-acl: | + { + "denyList": { + "ipv4": ["8.8.8.8/32", + "9.9.9.9/32"] + } + } + labels: + app: lb-fw-update-acl +spec: + type: LoadBalancer + selector: + app: lb-fw-update-acl + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-hostname-only-ingress/chainsaw-test.yaml b/e2e/test/lb-hostname-only-ingress/chainsaw-test.yaml new file mode 100644 index 00000000..69c7cd0e --- /dev/null +++ b/e2e/test/lb-hostname-only-ingress/chainsaw-test.yaml @@ -0,0 +1,64 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-hostname-only-ingress +spec: + namespace: "lb-hostname-only-ingress" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that svc-test-1 loadbalancer ingress contains only hostname + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test-1 + status: + (loadBalancer.ingress[0].ip != null): false + (loadBalancer.ingress[0].hostname != null): true + - name: Check that svc-test-2 loadbalancer ingress contains ip + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test-2 + status: + (loadBalancer.ingress[0].ip != null): true + (loadBalancer.ingress[0].hostname != null): true + - name: Annotate service + try: + - script: + content: | + set -e + kubectl annotate svc svc-test-2 -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-hostname-only-ingress=true + check: + ($error == null): true + - name: Check and make sure svc-test-2 ingress only contains hostname + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test-2 + status: + (loadBalancer.ingress[0].ip != null): false + (loadBalancer.ingress[0].hostname != null): true diff --git a/e2e/test/lb-hostname-only-ingress/create-pods-services.yaml b/e2e/test/lb-hostname-only-ingress/create-pods-services.yaml new file mode 100644 index 00000000..59d52fe6 --- /dev/null +++ b/e2e/test/lb-hostname-only-ingress/create-pods-services.yaml @@ -0,0 +1,66 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: hostname-ingress + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: hostname-ingress + template: + metadata: + labels: + app: hostname-ingress + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 80 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test-1 + annotations: + service.beta.kubernetes.io/linode-loadbalancer-hostname-only-ingress: "true" + labels: + app: hostname-ingress +spec: + type: LoadBalancer + selector: + app: hostname-ingress + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 80 + sessionAffinity: None +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test-2 + labels: + app: hostname-ingress +spec: + type: LoadBalancer + selector: + app: hostname-ingress + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 80 + sessionAffinity: None diff --git a/e2e/test/lb-http-body-health-check/chainsaw-test.yaml b/e2e/test/lb-http-body-health-check/chainsaw-test.yaml new file mode 100644 index 00000000..b6246b55 --- /dev/null +++ b/e2e/test/lb-http-body-health-check/chainsaw-test.yaml @@ -0,0 +1,66 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-http-body-health-check +spec: + namespace: "lb-http-body-health-check" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Fetch nodebalancer config for port 80 + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + for i in {1..10}; do + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs" | jq '.data[] | select(.port == 80)' || true) + + if [[ -z $nbconfig ]]; then + echo "Failed fetching nodebalancer config for port 80" + fi + + port_80_check=$(echo $nbconfig | jq '.check == "http_body"') + port_80_path=$(echo $nbconfig | jq '.check_path == "/"') + port_80_body=$(echo $nbconfig | jq '.check_body == "nginx"') + port_80_protocol=$(echo $nbconfig | jq '.protocol == "http"') + port_80_up_nodes=$(echo $nbconfig | jq '(.nodes_status.up)|tonumber >= 2') + + if [[ $port_80_check == "true" && $port_80_path == "true" && $port_80_body == "true" && $port_80_protocol == "true" && $port_80_up_nodes == "true" ]]; then + echo "All conditions met" + break + fi + sleep 15 + done + check: + ($error == null): true + (contains($stdout, 'All conditions met')): true diff --git a/e2e/test/lb-http-body-health-check/create-pods-services.yaml b/e2e/test/lb-http-body-health-check/create-pods-services.yaml new file mode 100644 index 00000000..1e93bd31 --- /dev/null +++ b/e2e/test/lb-http-body-health-check/create-pods-services.yaml @@ -0,0 +1,52 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: http-body-health-check + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: http-body-health-check + template: + metadata: + labels: + app: http-body-health-check + spec: + containers: + - image: nginx + name: test + ports: + - name: http + containerPort: 80 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-check-body: nginx + service.beta.kubernetes.io/linode-loadbalancer-check-path: / + service.beta.kubernetes.io/linode-loadbalancer-check-type: http_body + service.beta.kubernetes.io/linode-loadbalancer-default-protocol: http + name: svc-test + labels: + app: http-body-health-check +spec: + type: LoadBalancer + selector: + app: http-body-health-check + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 80 + sessionAffinity: None diff --git a/e2e/test/lb-http-status-health-check/chainsaw-test.yaml b/e2e/test/lb-http-status-health-check/chainsaw-test.yaml new file mode 100644 index 00000000..16f5b728 --- /dev/null +++ b/e2e/test/lb-http-status-health-check/chainsaw-test.yaml @@ -0,0 +1,65 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-http-status-health-check +spec: + namespace: "lb-http-status-health-check" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Fetch nodebalancer config for port 80 + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + for i in {1..10}; do + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs" | jq '.data[] | select(.port == 80)' || true) + + if [[ -z $nbconfig ]]; then + echo "Failed fetching nodebalancer config for port 80" + fi + + port_80_check=$(echo $nbconfig | jq '.check == "http"') + port_80_path=$(echo $nbconfig | jq '.check_path == "/"') + port_80_protocol=$(echo $nbconfig | jq '.protocol == "http"') + port_80_up_nodes=$(echo $nbconfig | jq '(.nodes_status.up)|tonumber >= 2') + + if [[ $port_80_check == "true" && $port_80_path == "true" && $port_80_protocol == "true" && $port_80_up_nodes == "true" ]]; then + echo "All conditions met" + break + fi + sleep 15 + done + check: + ($error == null): true + (contains($stdout, 'All conditions met')): true diff --git a/e2e/test/lb-http-status-health-check/create-pods-services.yaml b/e2e/test/lb-http-status-health-check/create-pods-services.yaml new file mode 100644 index 00000000..ab76db96 --- /dev/null +++ b/e2e/test/lb-http-status-health-check/create-pods-services.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: http-status-health-check + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: http-status-health-check + template: + metadata: + labels: + app: http-status-health-check + spec: + containers: + - image: nginx + name: test + ports: + - name: http + containerPort: 80 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-check-path: "/" + service.beta.kubernetes.io/linode-loadbalancer-check-type: "http" + service.beta.kubernetes.io/linode-loadbalancer-default-protocol: "http" + name: svc-test + labels: + app: http-status-health-check +spec: + type: LoadBalancer + selector: + app: http-status-health-check + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 80 + sessionAffinity: None diff --git a/e2e/test/lb-passive-health-check/chainsaw-test.yaml b/e2e/test/lb-passive-health-check/chainsaw-test.yaml new file mode 100644 index 00000000..d7479d88 --- /dev/null +++ b/e2e/test/lb-passive-health-check/chainsaw-test.yaml @@ -0,0 +1,64 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-passive-health-check +spec: + namespace: "lb-passive-health-check" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Fetch nodebalancer config for port 80 + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + for i in {1..10}; do + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs" | jq '.data[] | select(.port == 80)' || true) + + if [[ -z $nbconfig ]]; then + echo "Failed fetching nodebalancer config for port 80" + fi + + port_80_check=$(echo $nbconfig | jq '.check == "none"') + port_80_passive=$(echo $nbconfig | jq '.check_passive == true') + port_80_up_nodes=$(echo $nbconfig | jq '(.nodes_status.up)|tonumber >= 2') + + if [[ $port_80_check == "true" && $port_80_passive == "true" && $port_80_up_nodes == "true" ]]; then + echo "All conditions met" + break + fi + sleep 15 + done + check: + ($error == null): true + (contains($stdout, 'All conditions met')): true diff --git a/e2e/test/lb-passive-health-check/create-pods-services.yaml b/e2e/test/lb-passive-health-check/create-pods-services.yaml new file mode 100644 index 00000000..daf4f6fd --- /dev/null +++ b/e2e/test/lb-passive-health-check/create-pods-services.yaml @@ -0,0 +1,50 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: passive-health-check + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: passive-health-check + template: + metadata: + labels: + app: passive-health-check + spec: + containers: + - image: nginx + name: test + ports: + - name: http + containerPort: 80 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-check-passive: "true" + service.beta.kubernetes.io/linode-loadbalancer-check-type: none + name: svc-test + labels: + app: passive-health-check +spec: + type: LoadBalancer + selector: + app: passive-health-check + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 80 + sessionAffinity: None diff --git a/e2e/test/lb-preserve-annotation-new-nb-specified/chainsaw-test.yaml b/e2e/test/lb-preserve-annotation-new-nb-specified/chainsaw-test.yaml new file mode 100644 index 00000000..d7f2661d --- /dev/null +++ b/e2e/test/lb-preserve-annotation-new-nb-specified/chainsaw-test.yaml @@ -0,0 +1,106 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-preserve-annotation-new-nb-specified +spec: + namespace: "lb-preserve-annotation-new-nb-specified" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create resources + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Create new nodebalancer and update service to use it + try: + - script: + content: | + set -e + + re='^[0-9]+$' + + # Get existing nodebalancer id + old_nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + # Create new nodebalancer and use it + LABEL="ccm-$(head /dev/urandom | tr -dc 'a-z0-9' | head -c 5)" + + nbid=$(curl -s --request POST \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers" \ + --data "{\"label\": \"$LABEL\", \"region\": \"$REGION\"}" | jq .id) + + if ! [[ $nbid =~ $re ]]; then + echo "Nodebalancer id [$nbid] is incorrect, failed to create nodebalancer" + exit 1 + fi + + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id=$nbid + + for i in {1..10}; do + nbid2=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + if [[ $nbid == $nbid2 ]]; then + echo "updated nodebalancer used" + break + fi + sleep 5 + done + + # Check old nodebalancer still exists + old_nb_resp=$(curl --write-out "%{http_code}\n" \ + --silent --output /dev/null \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/nodebalancers/$old_nbid") + + if [[ $old_nb_resp == "200" ]]; then + echo "old nodebalancer found" + fi + + # cleanup old nodebalancer + delete_resp=$(curl --write-out "%{http_code}\n" \ + --silent --output /dev/null \ + -X DELETE \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/nodebalancers/$old_nbid") + + if [[ $delete_resp != "200" ]]; then + echo "failed deleting nodebalancer" + fi + + # cleanup new nodebalancer + delete_resp=$(curl --write-out "%{http_code}\n" \ + --silent --output /dev/null \ + -X DELETE \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/nodebalancers/$nbid") + + if [[ $delete_resp != "200" ]]; then + echo "failed deleting nodebalancer" + fi + check: + ($error == null): true + (contains($stdout, 'updated nodebalancer used')): true + (contains($stdout, 'old nodebalancer found')): true diff --git a/e2e/test/lb-preserve-annotation-new-nb-specified/create-pods-services.yaml b/e2e/test/lb-preserve-annotation-new-nb-specified/create-pods-services.yaml new file mode 100644 index 00000000..f0b9bc1c --- /dev/null +++ b/e2e/test/lb-preserve-annotation-new-nb-specified/create-pods-services.yaml @@ -0,0 +1,49 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: preserve-annotation-new-nb-specified + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: preserve-annotation-new-nb-specified + template: + metadata: + labels: + app: preserve-annotation-new-nb-specified + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + annotations: + service.beta.kubernetes.io/linode-loadbalancer-preserve: "true" + labels: + app: preserve-annotation-new-nb-specified +spec: + type: LoadBalancer + selector: + app: preserve-annotation-new-nb-specified + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-preserve-annotation-svc-delete/chainsaw-test.yaml b/e2e/test/lb-preserve-annotation-svc-delete/chainsaw-test.yaml new file mode 100644 index 00000000..2e33d401 --- /dev/null +++ b/e2e/test/lb-preserve-annotation-svc-delete/chainsaw-test.yaml @@ -0,0 +1,68 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-preserve-annotation-svc-delete +spec: + namespace: "lb-preserve-annotation-svc-delete" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create resources + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Delete pods, delete service and validate nb still exists + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + kubectl --timeout=60s -n $NAMESPACE delete deploy test + kubectl --timeout=60s -n $NAMESPACE delete svc svc-test + sleep 20 + + get_resp=$(curl --write-out "%{http_code}\n" \ + --silent --output /dev/null \ + -X GET \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/nodebalancers/$nbid") + + if [[ $get_resp == "200" ]]; then + echo "nodebalancer exists" + fi + + # cleanup remaining nodebalancer + delete_resp=$(curl --write-out "%{http_code}\n" \ + --silent --output /dev/null \ + -X DELETE \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/nodebalancers/$nbid") + + if ! [[ $delete_resp == "200" ]]; then + echo "failed deleting nodebalancer" + fi + check: + ($error == null): true + (contains($stdout, 'nodebalancer exists')): true diff --git a/e2e/test/lb-preserve-annotation-svc-delete/create-pods-services.yaml b/e2e/test/lb-preserve-annotation-svc-delete/create-pods-services.yaml new file mode 100644 index 00000000..3888da4a --- /dev/null +++ b/e2e/test/lb-preserve-annotation-svc-delete/create-pods-services.yaml @@ -0,0 +1,49 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: preserve-annotation-svc-delete + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: preserve-annotation-svc-delete + template: + metadata: + labels: + app: preserve-annotation-svc-delete + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + annotations: + service.beta.kubernetes.io/linode-loadbalancer-preserve: "true" + labels: + app: preserve-annotation-svc-delete +spec: + type: LoadBalancer + selector: + app: preserve-annotation-svc-delete + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-simple/chainsaw-test.yaml b/e2e/test/lb-simple/chainsaw-test.yaml new file mode 100644 index 00000000..2661961a --- /dev/null +++ b/e2e/test/lb-simple/chainsaw-test.yaml @@ -0,0 +1,84 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-simple +spec: + namespace: "lb-simple" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Fetch loadbalancer ip and check both pods reachable + try: + - script: + content: | + set -e + IP=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .status.loadBalancer.ingress[0].ip) + + podnames=() + + for i in {1..10}; do + if [[ ${#podnames[@]} -lt 2 ]]; then + output=$(curl -s $IP:80 | jq -e .podName || true) + + if [[ "$output" == *"test-"* ]]; then + unique=true + for i in "${array[@]}"; do + if [[ "$i" == "$output" ]]; then + unique=false + break + fi + done + if [[ "$unique" == true ]]; then + podnames+=($output) + fi + fi + else + break + fi + sleep 10 + done + + if [[ ${#podnames[@]} -lt 2 ]]; then + echo "all pods failed to respond" + else + echo "all pods responded" + fi + check: + ($error == null): true + (contains($stdout, 'all pods responded')): true + - name: Delete Pods + try: + - delete: + ref: + apiVersion: v1 + kind: Pod + - name: Delete Service + try: + - delete: + ref: + apiVersion: v1 + kind: Service diff --git a/e2e/test/lb-simple/create-pods-services.yaml b/e2e/test/lb-simple/create-pods-services.yaml new file mode 100644 index 00000000..0f503d9a --- /dev/null +++ b/e2e/test/lb-simple/create-pods-services.yaml @@ -0,0 +1,59 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: lb-simple + name: test +spec: + replicas: 2 + selector: + matchLabels: + app: lb-simple + template: + metadata: + labels: + app: lb-simple + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - simple-lb + topologyKey: kubernetes.io/hostname + weight: 100 + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + labels: + app: lb-simple +spec: + type: LoadBalancer + selector: + app: lb-simple + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-single-tls/chainsaw-test.yaml b/e2e/test/lb-single-tls/chainsaw-test.yaml new file mode 100644 index 00000000..a75e4964 --- /dev/null +++ b/e2e/test/lb-single-tls/chainsaw-test.yaml @@ -0,0 +1,92 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-single-tls +spec: + namespace: "lb-single-tls" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create secret + try: + - script: + content: | + set -e + kubectl -n $NAMESPACE create secret tls tls-secret --cert=../certificates/server.crt --key=../certificates/server.key + check: + ($error == null): true + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Fetch loadbalancer ip and check if pod is reachable + try: + - script: + content: | + set -e + IP=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .status.loadBalancer.ingress[0].ip) + + podnames=() + + for i in {1..10}; do + if [[ ${#podnames[@]} -lt 1 ]]; then + output=$(curl --resolve linode.test:80:$IP --cacert ../certificates/ca.crt -s https://linode.test:80 | jq -e .podName || true) + + if [[ "$output" == *"test-"* ]]; then + unique=true + for i in "${array[@]}"; do + if [[ "$i" == "$output" ]]; then + unique=false + break + fi + done + if [[ "$unique" == true ]]; then + podnames+=($output) + fi + fi + else + break + fi + sleep 10 + done + + if [[ ${#podnames[@]} -lt 1 ]]; then + echo "all pods failed to respond" + else + echo "all pods responded" + fi + check: + ($error == null): true + (contains($stdout, 'all pods responded')): true + - name: Delete Pods + try: + - delete: + ref: + apiVersion: v1 + kind: Pod + - name: Delete Service + try: + - delete: + ref: + apiVersion: v1 + kind: Service diff --git a/e2e/test/lb-single-tls/create-pods-services.yaml b/e2e/test/lb-single-tls/create-pods-services.yaml new file mode 100644 index 00000000..d749a6b6 --- /dev/null +++ b/e2e/test/lb-single-tls/create-pods-services.yaml @@ -0,0 +1,50 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: lb-single-tls + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: lb-single-tls + template: + metadata: + labels: + app: lb-single-tls + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: https + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + annotations: + service.beta.kubernetes.io/linode-loadbalancer-default-protocol: https + service.beta.kubernetes.io/linode-loadbalancer-port-80: '{ "tls-secret-name": "tls-secret" }' + labels: + app: lb-single-tls +spec: + type: LoadBalancer + selector: + app: lb-single-tls + ports: + - name: https + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-tcp-connection-health-check/chainsaw-test.yaml b/e2e/test/lb-tcp-connection-health-check/chainsaw-test.yaml new file mode 100644 index 00000000..f59f14e2 --- /dev/null +++ b/e2e/test/lb-tcp-connection-health-check/chainsaw-test.yaml @@ -0,0 +1,67 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-tcp-connection-health-check +spec: + namespace: "lb-tcp-connection-health-check" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Fetch nodebalancer config for port 80 + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + for i in {1..10}; do + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs" | jq '.data[] | select(.port == 80)' || true) + + if [[ -z $nbconfig ]]; then + echo "Failed fetching nodebalancer config for port 80" + fi + + port_80_check=$(echo $nbconfig | jq '.check == "connection"') + port_80_interval=$(echo $nbconfig | jq '.check_interval == 10') + port_80_timeout=$(echo $nbconfig | jq '.check_timeout == 5') + port_80_attempts=$(echo $nbconfig | jq '.check_attempts == 4') + port_80_protocol=$(echo $nbconfig | jq '.protocol == "tcp"') + port_80_up_nodes=$(echo $nbconfig | jq '(.nodes_status.up)|tonumber >= 2') + + if [[ $port_80_check == "true" && $port_80_interval == "true" && $port_80_timeout == "true" && $port_80_attempts == "true" && $port_80_protocol == "true" && $port_80_up_nodes == "true" ]]; then + echo "All conditions met" + break + fi + sleep 15 + done + check: + ($error == null): true + (contains($stdout, 'All conditions met')): true diff --git a/e2e/test/lb-tcp-connection-health-check/create-pods-services.yaml b/e2e/test/lb-tcp-connection-health-check/create-pods-services.yaml new file mode 100644 index 00000000..0eae0673 --- /dev/null +++ b/e2e/test/lb-tcp-connection-health-check/create-pods-services.yaml @@ -0,0 +1,53 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: tcp-connection-health-check + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: tcp-connection-health-check + template: + metadata: + labels: + app: tcp-connection-health-check + spec: + containers: + - image: nginx + name: test + ports: + - name: http + containerPort: 80 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-check-attempts: "4" + service.beta.kubernetes.io/linode-loadbalancer-check-interval: "10" + service.beta.kubernetes.io/linode-loadbalancer-check-timeout: "5" + service.beta.kubernetes.io/linode-loadbalancer-check-type: connection + service.beta.kubernetes.io/linode-loadbalancer-default-protocol: tcp + name: svc-test + labels: + app: tcp-connection-health-check +spec: + type: LoadBalancer + selector: + app: tcp-connection-health-check + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 80 + sessionAffinity: None diff --git a/e2e/test/lb-updated-with-nb-id/chainsaw-test.yaml b/e2e/test/lb-updated-with-nb-id/chainsaw-test.yaml new file mode 100644 index 00000000..c897979b --- /dev/null +++ b/e2e/test/lb-updated-with-nb-id/chainsaw-test.yaml @@ -0,0 +1,69 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-updated-with-nb-id +spec: + namespace: "lb-updated-with-nb-id" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Create nodebalancer, annotate svc with nodebalancer id and validate + try: + - script: + content: | + set -e + + re='^[0-9]+$' + LABEL="ccm-$(head /dev/urandom | tr -dc 'a-z0-9' | head -c 5)" + + nbid=$(curl -s --request POST \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers" \ + --data "{\"label\": \"$LABEL\", \"region\": \"$REGION\"}" | jq .id) + + if ! [[ $nbid =~ $re ]]; then + echo "Nodebalancer id [$nbid] for label [$lABEL] is incorrect, failed to create nodebalancer" + exit 1 + fi + + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id=$nbid + sleep 5 + + for i in {1..10}; do + nbid2=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + if [[ $nbid == $nbid2 ]]; then + echo "Condition met" + break + fi + sleep 10 + done + check: + ($error == null): true + (contains($stdout, 'Condition met')): true diff --git a/e2e/test/lb-updated-with-nb-id/create-pods-services.yaml b/e2e/test/lb-updated-with-nb-id/create-pods-services.yaml new file mode 100644 index 00000000..41b75aab --- /dev/null +++ b/e2e/test/lb-updated-with-nb-id/create-pods-services.yaml @@ -0,0 +1,47 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: updated-with-nb-id + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: updated-with-nb-id + template: + metadata: + labels: + app: updated-with-nb-id + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + labels: + app: updated-with-nb-id +spec: + type: LoadBalancer + selector: + app: updated-with-nb-id + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-with-http-to-https/chainsaw-test.yaml b/e2e/test/lb-with-http-to-https/chainsaw-test.yaml new file mode 100644 index 00000000..745b77ad --- /dev/null +++ b/e2e/test/lb-with-http-to-https/chainsaw-test.yaml @@ -0,0 +1,90 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-with-http-to-https +spec: + namespace: "lb-with-http-to-https" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Create secrets + try: + - script: + content: | + set -e + kubectl -n $NAMESPACE create secret tls tls-secret-1 --cert=../certificates/server.crt --key=../certificates/server.key + check: + ($error == null): true + - name: Update service to have another annotation and port + try: + - script: + content: | + set -e + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-port-443='{"tls-secret-name": "tls-secret-1", "protocol": "https"}' + kubectl patch svc svc-test -n $NAMESPACE --type='json' -p='[{"op": "add", "path": "/spec/ports/-", "value": {"name": "https", "port": 443, "targetPort": 8080, "protocol": "TCP"}}]' + sleep 10 + check: + ($error == null): true + - name: Check endpoints + try: + - assert: + resource: + apiVersion: v1 + kind: Endpoints + metadata: + name: svc-test + (subsets[0].addresses != null): true + (subsets[0].ports != null): true + (length(subsets[0].ports)): 2 + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Fetch loadbalancer ip and check if pod reachable on different ports with different protocols + try: + - script: + content: | + set -e + IP=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .status.loadBalancer.ingress[0].ip) + + for i in {1..10}; do + port_80=$(curl -s $IP:80 | grep "test-" || true) + port_443=$(curl --resolve linode.test:443:$IP --cacert ../certificates/ca.crt -s https://linode.test:443 | grep "test-" || true) + + if [[ -z $port_80 || -z $port_443 ]]; then + sleep 20 + else + echo "all pods responded" + break + fi + done + check: + ($error == null): true + (contains($stdout, 'all pods responded')): true diff --git a/e2e/test/lb-with-http-to-https/create-pods-services.yaml b/e2e/test/lb-with-http-to-https/create-pods-services.yaml new file mode 100644 index 00000000..775db623 --- /dev/null +++ b/e2e/test/lb-with-http-to-https/create-pods-services.yaml @@ -0,0 +1,50 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: http-to-https + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: http-to-https + template: + metadata: + labels: + app: http-to-https + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: alpha + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-default-protocol: https + service.beta.kubernetes.io/linode-loadbalancer-port-80: '{"protocol": "http"}' + name: svc-test + labels: + app: http-to-https +spec: + type: LoadBalancer + selector: + app: http-to-https + ports: + - name: http + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-with-multiple-http-https-ports/chainsaw-test.yaml b/e2e/test/lb-with-multiple-http-https-ports/chainsaw-test.yaml new file mode 100644 index 00000000..da73d113 --- /dev/null +++ b/e2e/test/lb-with-multiple-http-https-ports/chainsaw-test.yaml @@ -0,0 +1,84 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-with-multiple-http-https-ports +spec: + namespace: "lb-with-multiple-http-https-ports" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Create secrets + try: + - script: + content: | + set -e + kubectl -n $NAMESPACE create secret tls tls-secret-1 --cert=../certificates/server.crt --key=../certificates/server.key + kubectl -n $NAMESPACE create secret tls tls-secret-2 --cert=../certificates/server.crt --key=../certificates/server.key + sleep 2 + check: + ($error == null): true + - name: Check endpoints exist + try: + - assert: + resource: + apiVersion: v1 + kind: Endpoints + metadata: + name: svc-test + (subsets[0].addresses != null): true + (subsets[0].ports != null): true + (length(subsets[0].ports)): 4 + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Fetch loadbalancer ip and check if pod reachable on different ports with different protocols + try: + - script: + content: | + set -e + IP=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .status.loadBalancer.ingress[0].ip) + + for i in {1..10}; do + port_80=$(curl -s $IP:80 | grep "test-" || true) + port_8080=$(curl -s $IP:8080 | grep "test-" || true) + port_443=$(curl --resolve linode.test:443:$IP --cacert ../certificates/ca.crt -s https://linode.test:443 | grep "test-" || true) + port_8443=$(curl --resolve linode.test:8443:$IP --cacert ../certificates/ca.crt -s https://linode.test:8443 | grep "test-" || true) + + if [[ -z $port_80 || -z $port_8080 || -z $port_443 || -z $port_8443 ]]; then + sleep 15 + else + echo "all pods responded" + break + fi + done + check: + ($error == null): true + (contains($stdout, 'all pods responded')): true diff --git a/e2e/test/lb-with-multiple-http-https-ports/create-pods-services.yaml b/e2e/test/lb-with-multiple-http-https-ports/create-pods-services.yaml new file mode 100644 index 00000000..c29dc014 --- /dev/null +++ b/e2e/test/lb-with-multiple-http-https-ports/create-pods-services.yaml @@ -0,0 +1,68 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: multiple-http-https-ports + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: multiple-http-https-ports + template: + metadata: + labels: + app: multiple-http-https-ports + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: alpha + containerPort: 8080 + protocol: TCP + - name: beta + containerPort: 8989 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-default-protocol: https + service.beta.kubernetes.io/linode-loadbalancer-port-80: '{"protocol": "http"}' + service.beta.kubernetes.io/linode-loadbalancer-port-443: '{"tls-secret-name": "tls-secret-1"}' + service.beta.kubernetes.io/linode-loadbalancer-port-8080: '{"protocol": "http"}' + service.beta.kubernetes.io/linode-loadbalancer-port-8443: '{"tls-secret-name": "tls-secret-2", "protocol": "https"}' + name: svc-test + labels: + app: multiple-http-https-ports +spec: + type: LoadBalancer + selector: + app: multiple-http-https-ports + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8989 + - name: http-2 + protocol: TCP + port: 8080 + targetPort: 8080 + - name: https-1 + protocol: TCP + port: 443 + targetPort: 8080 + - name: https-2 + protocol: TCP + port: 8443 + targetPort: 8989 + sessionAffinity: None diff --git a/e2e/test/lb-with-node-addition/chainsaw-test.yaml b/e2e/test/lb-with-node-addition/chainsaw-test.yaml new file mode 100644 index 00000000..62f17873 --- /dev/null +++ b/e2e/test/lb-with-node-addition/chainsaw-test.yaml @@ -0,0 +1,99 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-with-node-addition +spec: + namespace: "lb-with-node-addition" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create resources + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Validate nodebalancer has 2 nodes + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + for i in {1..10}; do + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs" | jq '.data[]? | select(.port == 80)') + + if [[ -z $nbconfig ]]; then + echo "Failed fetching nodebalancer config for port 80" + fi + + port_80_up_nodes=$(echo $nbconfig | jq '(.nodes_status.up)|tonumber == 2') + + if [[ $port_80_up_nodes == "true" ]]; then + echo "all nodes up" + break + fi + sleep 15 + done + check: + ($error == null): true + (contains($stdout, 'all nodes up')): true + - name: Add new node and check nodebalancer gets updated + try: + - script: + content: | + set -e + + current_replicas=$(KUBECONFIG=$MGMT_KUBECONFIG kubectl get machinedeployment ${CLUSTER_NAME}-md-0 -o=jsonpath='{.spec.replicas}') + required_replicas=$((current_replicas + 1)) + KUBECONFIG=$MGMT_KUBECONFIG kubectl patch machinedeployment ${CLUSTER_NAME}-md-0 --type='merge' -p "{\"spec\":{\"replicas\":$required_replicas}}" + + sleep 180 + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + for i in {1..10}; do + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs" | jq '.data[]? | select(.port == 80)' || true) + + if [[ -z $nbconfig ]]; then + echo "Failed fetching nodebalancer config for port 80" + fi + + port_80_up_nodes=$(echo $nbconfig | jq '(.nodes_status.up)|tonumber == 3') + + if [[ $port_80_up_nodes == "true" ]]; then + echo "all nodes up" + break + fi + sleep 20 + done + + #KUBECONFIG=$MGMT_KUBECONFIG kubectl patch machinedeployment ${CLUSTER_NAME}-md-0 --type='merge' -p "{\"spec\":{\"replicas\":$current_replicas}}" + check: + ($error == null): true + (contains($stdout, 'all nodes up')): true diff --git a/e2e/test/lb-with-node-addition/create-pods-services.yaml b/e2e/test/lb-with-node-addition/create-pods-services.yaml new file mode 100644 index 00000000..39a55b9d --- /dev/null +++ b/e2e/test/lb-with-node-addition/create-pods-services.yaml @@ -0,0 +1,47 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: with-node-addition + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: with-node-addition + template: + metadata: + labels: + app: with-node-addition + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + labels: + app: with-node-addition +spec: + type: LoadBalancer + selector: + app: with-node-addition + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-with-proxyprotocol-default-annotation/chainsaw-test.yaml b/e2e/test/lb-with-proxyprotocol-default-annotation/chainsaw-test.yaml new file mode 100644 index 00000000..e8e07665 --- /dev/null +++ b/e2e/test/lb-with-proxyprotocol-default-annotation/chainsaw-test.yaml @@ -0,0 +1,112 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-with-proxyprotocol-default-annotation +spec: + namespace: "lb-with-proxyprotocol-default-annotation" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check endpoints exist + try: + - assert: + resource: + apiVersion: v1 + kind: Endpoints + metadata: + name: svc-test + (subsets[0].addresses != null): true + (subsets[0].ports != null): true + - name: Add ProxyProtocol v2 using deprecated annotation + try: + - script: + content: | + set -e + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-proxy-protocol=v2 + sleep 10 + check: + ($error == null): true + - name: Check NodeBalancerConfig for port 80 and 8080 have ProxyProtocol v2 + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs") + + port_80_v2=$(echo $nbconfig | jq -r '.data[] | select(.port == 80) | .proxy_protocol == "v2"') + port_8080_v2=$(echo $nbconfig | jq -r '.data[] | select(.port == 8080) | .proxy_protocol == "v2"') + + if [[ $port_80_v2 == "true" && $port_8080_v2 == "true" ]]; then + echo "Conditions met" + else + echo "Conditions not met" + fi + check: + ($error): ~ + (contains($stdout, 'Conditions met')): true + - name: Add default annotation for ProxyProtocol v1 + try: + - script: + content: | + set -e + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-default-proxy-protocol=v1 + sleep 10 + check: + ($error == null): true + - name: Check NodeBalancerConfig for port 80 and 8080 have ProxyProtocol v1 + try: + - script: + content: | + set -e + + re='^[0-9]+$' + + hostname=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .status.loadBalancer.ingress[0].hostname) + ip=$(echo $hostname | awk -F'.' '{gsub("-", ".", $1); print $1}') + nbid=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "X-Filter: {\"ipv4\": \"$ip\"}" \ + "https://api.linode.com/v4/nodebalancers" | jq .data[].id) + + if ! [[ $nbid =~ $re ]]; then + echo "Nodebalancer id [$nbid] is incorrect, doesn't meet regex requirements" + exit 1 + fi + + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs") + + port_80_v1=$(echo $nbconfig | jq -r '.data[] | select(.port == 80) | .proxy_protocol == "v1"') + port_8080_v1=$(echo $nbconfig | jq -r '.data[] | select(.port == 8080) | .proxy_protocol == "v1"') + + if [[ $port_80_v1 == "true" && $port_8080_v1 == "true" ]]; then + echo "Conditions met" + else + echo "Conditions not met" + fi + check: + ($error): ~ + (contains($stdout, 'Conditions met')): true diff --git a/e2e/test/lb-with-proxyprotocol-default-annotation/create-pods-services.yaml b/e2e/test/lb-with-proxyprotocol-default-annotation/create-pods-services.yaml new file mode 100644 index 00000000..4ac2edc2 --- /dev/null +++ b/e2e/test/lb-with-proxyprotocol-default-annotation/create-pods-services.yaml @@ -0,0 +1,54 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: proxyprotocol-default-annotation + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: proxyprotocol-default-annotation + template: + metadata: + labels: + app: proxyprotocol-default-annotation + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 80 + protocol: TCP + - name: http-2 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + labels: + app: proxyprotocol-default-annotation +spec: + type: LoadBalancer + selector: + app: proxyprotocol-default-annotation + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 80 + - name: http-2 + protocol: TCP + port: 8080 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-with-proxyprotocol-override/chainsaw-test.yaml b/e2e/test/lb-with-proxyprotocol-override/chainsaw-test.yaml new file mode 100644 index 00000000..384fdc4a --- /dev/null +++ b/e2e/test/lb-with-proxyprotocol-override/chainsaw-test.yaml @@ -0,0 +1,100 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-with-proxyprotocol-override +spec: + namespace: "lb-with-proxyprotocol-override" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check endpoints exist + try: + - assert: + resource: + apiVersion: v1 + kind: Endpoints + metadata: + name: svc-test + (subsets[0].addresses != null): true + (subsets[0].ports != null): true + - name: Annotate service port 80 with v1 and 8080 with v2 + try: + - script: + content: | + set -e + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-port-80='{"proxy-protocol": "v1"}' + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-port-8080='{"proxy-protocol": "v2"}' + sleep 10 + - name: Check NodeBalancerConfig for port 80 to have ProxyProtocol v1 and port 8080 to have ProxyProtocol v2 + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs") + + port_80_v1=$(echo $nbconfig | jq -r '.data[] | select(.port == 80) | .proxy_protocol == "v1"') + port_8080_v2=$(echo $nbconfig | jq -r '.data[] | select(.port == 8080) | .proxy_protocol == "v2"') + + if [[ $port_80_v1 == "true" && $port_8080_v2 == "true" ]]; then + echo "Conditions met" + else + echo "Conditions not met" + fi + check: + ($error): ~ + (contains($stdout, 'Conditions met')): true + - name: Update service annotation for port 80 to v2 and 8080 with v1 + try: + - script: + content: | + set -e + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-default-proxy-protocol=v2 + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-port-80- + kubectl annotate --overwrite svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-port-8080='{"proxy-protocol": "v1"}' + sleep 10 + check: + ($error == null): true + - name: Check NodeBalancerConfig for port 80 to have ProxyProtocol v2 and port 8080 to have ProxyProtocol v1 + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs") + + port_80_v2=$(echo $nbconfig | jq -r '.data[] | select(.port == 80) | .proxy_protocol == "v2"') + port_8080_v1=$(echo $nbconfig | jq -r '.data[] | select(.port == 8080) | .proxy_protocol == "v1"') + + if [[ $port_80_v2 == "true" && $port_8080_v1 == "true" ]]; then + echo "Conditions met" + else + echo "Conditions not met" + fi + check: + ($error): ~ + (contains($stdout, 'Conditions met')): true diff --git a/e2e/test/lb-with-proxyprotocol-override/create-pods-services.yaml b/e2e/test/lb-with-proxyprotocol-override/create-pods-services.yaml new file mode 100644 index 00000000..a6247c4d --- /dev/null +++ b/e2e/test/lb-with-proxyprotocol-override/create-pods-services.yaml @@ -0,0 +1,54 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: proxyprotocol-override + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: proxyprotocol-override + template: + metadata: + labels: + app: proxyprotocol-override + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 80 + protocol: TCP + - name: http-2 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + labels: + app: proxyprotocol-override +spec: + type: LoadBalancer + selector: + app: proxyprotocol-override + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 80 + - name: http-2 + protocol: TCP + port: 8080 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-with-proxyprotocol-port-specific/chainsaw-test.yaml b/e2e/test/lb-with-proxyprotocol-port-specific/chainsaw-test.yaml new file mode 100644 index 00000000..61cc3d25 --- /dev/null +++ b/e2e/test/lb-with-proxyprotocol-port-specific/chainsaw-test.yaml @@ -0,0 +1,66 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-with-proxyprotocol-port-specific +spec: + namespace: "lb-with-proxyprotocol-port-specific" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check endpoints exist + try: + - assert: + resource: + apiVersion: v1 + kind: Endpoints + metadata: + name: svc-test + (subsets[0].addresses != null): true + (subsets[0].ports != null): true + - name: Annotate service port 80 with v1 and 8080 with v2 + try: + - script: + content: | + set -e + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-port-8080='{"proxy-protocol": "v2"}' + sleep 10 + check: + ($error == null): true + - name: Check NodeBalancerConfig for port 80 to not have ProxyProtocol and port 8080 to have ProxyProtocol v2 + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs") + + port_80_none=$(echo $nbconfig | jq -r '.data[] | select(.port == 80) | .proxy_protocol == "none"') + port_8080_v2=$(echo $nbconfig | jq -r '.data[] | select(.port == 8080) | .proxy_protocol == "v2"') + + if [[ $port_80_none == "true" && $port_8080_v2 == "true" ]]; then + echo "Conditions met" + else + echo "Conditions not met" + fi + check: + ($error): ~ + (contains($stdout, 'Conditions met')): true diff --git a/e2e/test/lb-with-proxyprotocol-port-specific/create-pods-services.yaml b/e2e/test/lb-with-proxyprotocol-port-specific/create-pods-services.yaml new file mode 100644 index 00000000..95c0a822 --- /dev/null +++ b/e2e/test/lb-with-proxyprotocol-port-specific/create-pods-services.yaml @@ -0,0 +1,54 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: proxyprotocol-port-specific + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: proxyprotocol-port-specific + template: + metadata: + labels: + app: proxyprotocol-port-specific + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 80 + protocol: TCP + - name: http-2 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + labels: + app: proxyprotocol-port-specific +spec: + type: LoadBalancer + selector: + app: proxyprotocol-port-specific + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 80 + - name: http-2 + protocol: TCP + port: 8080 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-with-proxyprotocol-set/chainsaw-test.yaml b/e2e/test/lb-with-proxyprotocol-set/chainsaw-test.yaml new file mode 100644 index 00000000..c4a43b2d --- /dev/null +++ b/e2e/test/lb-with-proxyprotocol-set/chainsaw-test.yaml @@ -0,0 +1,77 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-with-proxyprotocol-set +spec: + namespace: "lb-with-proxyprotocol-set" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check endpoints exist + try: + - assert: + resource: + apiVersion: v1 + kind: Endpoints + metadata: + name: svc-test + (subsets[0].addresses != null): true + (subsets[0].ports != null): true + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Annotate service port 80 with v1 and 8080 with v2 + try: + - script: + content: | + set -e + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-port-80='{"proxy-protocol": "v1"}' + kubectl annotate svc svc-test -n $NAMESPACE service.beta.kubernetes.io/linode-loadbalancer-port-8080='{"proxy-protocol": "v2"}' + sleep 10 + check: + ($error == null): true + - name: Check NodeBalancerConfig for port 80 to have ProxyProtocol v1 and port 8080 to have ProxyProtocol v2 + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs") + + port_80_v1=$(echo $nbconfig | jq -r '.data[] | select(.port == 80) | .proxy_protocol == "v1"') + port_8080_v2=$(echo $nbconfig | jq -r '.data[] | select(.port == 8080) | .proxy_protocol == "v2"') + + if [[ $port_80_v1 == "true" && $port_8080_v2 == "true" ]]; then + echo "Conditions met" + else + echo "Conditions not met" + fi + check: + ($error): ~ + (contains($stdout, 'Conditions met')): true diff --git a/e2e/test/lb-with-proxyprotocol-set/create-pods-services.yaml b/e2e/test/lb-with-proxyprotocol-set/create-pods-services.yaml new file mode 100644 index 00000000..80b96d86 --- /dev/null +++ b/e2e/test/lb-with-proxyprotocol-set/create-pods-services.yaml @@ -0,0 +1,54 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: proxyprotocol-set + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: proxyprotocol-set + template: + metadata: + labels: + app: proxyprotocol-set + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 80 + protocol: TCP + - name: http-2 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + labels: + app: proxyprotocol-set +spec: + type: LoadBalancer + selector: + app: proxyprotocol-set + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 80 + - name: http-2 + protocol: TCP + port: 8080 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/route-controller-test/chainsaw-test.yaml b/e2e/test/route-controller-test/chainsaw-test.yaml new file mode 100644 index 00000000..236f13df --- /dev/null +++ b/e2e/test/route-controller-test/chainsaw-test.yaml @@ -0,0 +1,65 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: route-controller-test +spec: + bindings: + - name: fwname + value: (join('-', ['ccm-fwtest', env('CLUSTER_NAME')])) + namespace: "route-controller-test" + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml + - name: Check if the route controller updated the config for the linode + try: + - script: + content: | + set -e + + if [ -z "$KUBECONFIG" ] || [ -z "$LINODE_TOKEN" ]; then + echo "Error: KUBECONFIG and LINODE_TOKEN environment variables must be set" + exit 1 + fi + + # Get all node names + nodes=$(kubectl get nodes -o jsonpath='{.items[*].metadata.name}') + if [ -z "$nodes" ]; then + echo "Error: No nodes found in cluster" + exit 1 + fi + + # Process each node + for node in $nodes; do + echo "Checking node: $node" + + # Get pod CIDR and instance ID + pod_cidr=$(kubectl get node "$node" -o jsonpath='{.spec.podCIDR}') + instance_id=$(kubectl get node "$node" -o jsonpath='{.spec.providerID}' | sed 's/linode:\/\///') + + echo " Pod CIDR: $pod_cidr" + echo " Instance ID: $instance_id" + + # Get interface details for this config + interfaces=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/linode/instances/$instance_id/configs" \ + | jq -r '.data[0].interfaces') + + # Check if pod CIDR is in the VPC interface IP ranges + if echo "$interfaces" | jq -e --arg cidr "$pod_cidr" '.[] | select(.purpose == "vpc") | .ip_ranges[] | select(. == $cidr)' > /dev/null; then + echo "Pod CIDR found in VPC interface configuration" + else + echo "Pod CIDR not found in VPC interface configuration" + echo "Current VPC interface configuration:" + echo "$interfaces" | jq '.[] | select(.purpose == "vpc")' + fi + + echo "---" + done + + check: + ($error == null): true + (contains($stdout, 'Pod CIDR not found in VPC interface configuration')): false diff --git a/e2e/test/scripts/create_cluster.sh b/e2e/test/scripts/create_cluster.sh deleted file mode 100755 index 63f0bbb8..00000000 --- a/e2e/test/scripts/create_cluster.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit -set -o pipefail -set -o nounset - -export LINODE_API_TOKEN="$1" -export CLUSTER_NAME="$2" -export IMAGE="$3" -export K8S_VERSION="$4" - -if [[ -z "$5" ]] -then - export REGION="eu-west" -else - export REGION="$5" -fi - -cat > cluster.tf < k8s.io/api v0.23.17 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.23.17 - k8s.io/apimachinery => k8s.io/apimachinery v0.23.17 - k8s.io/apiserver => k8s.io/apiserver v0.23.17 - k8s.io/cli-runtime => k8s.io/cli-runtime v0.23.17 - k8s.io/client-go => k8s.io/client-go v0.23.17 - k8s.io/cloud-provider => k8s.io/cloud-provider v0.23.17 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.23.17 - k8s.io/code-generator => k8s.io/code-generator v0.23.17 - k8s.io/component-base => k8s.io/component-base v0.23.17 - k8s.io/cri-api => k8s.io/cri-api v0.23.17 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.23.17 - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.23.17 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.23.17 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.23.17 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.23.17 - k8s.io/kubectl => k8s.io/kubectl v0.23.17 - k8s.io/kubelet => k8s.io/kubelet v0.23.17 - k8s.io/kubernetes => k8s.io/kubernetes v1.21.0 - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.23.17 - k8s.io/metrics => k8s.io/metrics v0.23.17 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.23.17 + k8s.io/apiextensions-apiserver v0.30.2 // indirect + k8s.io/apiserver v0.32.1 // indirect + k8s.io/component-helpers v0.32.1 // indirect + k8s.io/controller-manager v0.32.1 // indirect + k8s.io/kms v0.32.1 // indirect + k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.1 // indirect + sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.5.0 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index be8435ef..215873ec 100644 --- a/go.sum +++ b/go.sum @@ -1,1055 +1,509 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= -cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= -cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= -cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8= -cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/CloudyKit/fastprinter v0.0.0-20170127035650-74b38d55f37a/go.mod h1:EFZQ978U7x8IRnstaskI3IysnWY5Ao3QgZUKOXlsAdw= -github.com/CloudyKit/jet v2.1.3-0.20180809161101-62edd43e4f88+incompatible/go.mod h1:HPYO+50pSWkPoj9Q/eq0aRGByCL6ScRlUmiEX5Zgm+w= -github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= -github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +cel.dev/expr v0.19.1 h1:NciYrtDRIR0lNCnH1LFJegdjspNx9fI59O7TWcua/W4= +cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= +github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/sprig/v3 v3.2.2 h1:17jRggJu518dr3QaafizSXOjKYp94wKfABxUmyxvxX8= +github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= -github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/appscode/go v0.0.0-20200323182826-54e98e09185a h1:cZ80NKoLRaW1PVCWXAJE+YFkBAmLZ8BnrJmH0ClY1Gs= -github.com/appscode/go v0.0.0-20200323182826-54e98e09185a/go.mod h1:lIcm8Z6VPuvcw/a3EeOWcG6R3I13iHMLYbtVP7TKufY= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= -github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= -github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= -github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ= +github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw= +github.com/appscode/go v0.0.0-20201105063637-5613f3b8169f h1:heDuWjdnY2rJIgLwIQjWPgOc0BUWWX6OGOeB+0t8v/s= +github.com/appscode/go v0.0.0-20201105063637-5613f3b8169f/go.mod h1:piHRpQ9+NTTuV3V98INxjU7o2KlAJMznaxvB6wHKkfU= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/beevik/ntp v0.3.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= -github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cilium/cilium v1.16.6 h1:KRQn5knO48ERxB6SusQo02nYmE0NO0qiLlvqhwBTXbI= +github.com/cilium/cilium v1.16.6/go.mod h1:NnDWQiYmPef24+pX2U/V85uL8eUTJSFUUjMEy41lGPA= +github.com/cilium/ebpf v0.16.0 h1:+BiEnHL6Z7lXnlGUsXQPPAE7+kenAd4ES8MQ5min0Ok= +github.com/cilium/ebpf v0.16.0/go.mod h1:L7u2Blt2jMM/vLAVgjxluxtBKlz3/GWjB0dMOEngfwE= +github.com/cilium/hive v0.0.0-20241213121623-605c1412b9b3 h1:RfmUH1ouzj0LzORYJRhp43e1rlGpx6GNv4NIRUakU2w= +github.com/cilium/hive v0.0.0-20241213121623-605c1412b9b3/go.mod h1:pI2GJ1n3SLKIQVFrKF7W6A6gb6BQkZ+3Hp4PAEo5SuI= +github.com/cilium/proxy v0.0.0-20241216122539-268a44ec93e9 h1:3m0eujK8+y8cKqkQsLSulES72gFayNgcaGXlpwc6bKY= +github.com/cilium/proxy v0.0.0-20241216122539-268a44ec93e9/go.mod h1:1jlssjN+8AsZeex4+7ERavw5vRa/lce/ybVRamfeQSU= +github.com/cilium/statedb v0.2.4 h1:jCyXGcsiXgpJSfpfRRGKd+TD3U1teeDtOnqCyErsHsI= +github.com/cilium/statedb v0.2.4/go.mod h1:KPwsudjhZ90zoBguYMtssKpstR74jVKd/D+73PZy+sg= +github.com/cilium/stream v0.0.0-20240226091623-f979d32855f8 h1:j6VF1s6gz3etRH5ObCr0UUyJblP9cK5fbgkQTz8fTRA= +github.com/cilium/stream v0.0.0-20240226091623-f979d32855f8/go.mod h1:/e83AwqvNKpyg4n3C41qmnmj1x2G9DwzI+jb7GkF4lI= +github.com/cncf/xds/go v0.0.0-20241213214725-57cfbe6fad57 h1:put7Je9ZyxbHtwr7IqGrW4LLVUupJQ2gbsDshKISSgU= +github.com/cncf/xds/go v0.0.0-20241213214725-57cfbe6fad57/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= -github.com/codeskyblue/go-sh v0.0.0-20190412065543-76bd3d59ff27/go.mod h1:VQx0hjo2oUeQkQUET7wRwradO6f+fN5jzXgB/zROxxE= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/codeskyblue/go-sh v0.0.0-20200712050446-30169cf553fe/go.mod h1:VQx0hjo2oUeQkQUET7wRwradO6f+fN5jzXgB/zROxxE= +github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= +github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= -github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= -github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= +github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= +github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= -github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA= -github.com/form3tech-oss/jwt-go v3.2.3+incompatible h1:7ZaBxOI7TMoYBfyA3cQHErNNyAWIKUMIwqxEtgHOs5c= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= -github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= -github.com/getsentry/sentry-go v0.4.0 h1:WqRI2/7EiALbdG9qGB47c0Aks1tdznG5DZd6GSQ1y/8= -github.com/getsentry/sentry-go v0.4.0/go.mod h1:xkGcb82SipKQloDNa5b7hTV4VdEyc2bhwd1/UczP52k= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= -github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= -github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= -github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= -github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= -github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-resty/resty/v2 v2.9.1 h1:PIgGx4VrHvag0juCJ4dDv3MiFRlDmP0vicBucwf+gLM= -github.com/go-resty/resty/v2 v2.9.1/go.mod h1:4/GYJVjh9nhkhGR6AUNW3XhpDYNUr+Uvy9gV/VGZIy4= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= -github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= -github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/getsentry/sentry-go v0.31.1 h1:ELVc0h7gwyhnXHDouXkhqTFSO5oslsRDk0++eyE0KJ4= +github.com/getsentry/sentry-go v0.31.1/go.mod h1:CYNcMMz73YigoHljQRG+qPF+eMq8gG72XcGN/p71BAY= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= +github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= +github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w= +github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= +github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= +github.com/go-openapi/runtime v0.28.0 h1:gpPPmWSNGo214l6n8hzdXYhPuJcGtziTOgUpvsFWGIQ= +github.com/go-openapi/runtime v0.28.0/go.mod h1:QN7OzcS+XuYmkQLw05akXk0jRH/eZ3kb18+1KwW9gyc= +github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= +github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= +github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= +github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= +github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= +github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= +github.com/go-resty/resty/v2 v2.16.3 h1:zacNT7lt4b8M/io2Ahj6yPypL7bqx9n1iprfQuodV+E= +github.com/go-resty/resty/v2 v2.16.3/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/gojuno/minimock/v3 v3.0.10 h1:0UbfgdLHaNRPHWF/RFYPkwxV2KI+SE4tR0dDSFMD7+A= +github.com/gojuno/minimock/v3 v3.0.10/go.mod h1:CFXcUJYnBe+1QuNzm+WmdPYtvi/+7zQcPcyQGsbcIXg= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/cel-go v0.22.1 h1:AfVXx3chM2qwoSbM7Da8g8hX8OVSkBFwX+rz2+PcK40= +github.com/google/cel-go v0.22.1/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= +github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= -github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= -github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= +github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= +github.com/hashicorp/hcl v1.0.1-vault-7 h1:ag5OxFVy3QYTFTJODRzTKVZ6xvdfLLCA1cy/Y6xGI0I= +github.com/hashicorp/hcl v1.0.1-vault-7/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= +github.com/hexdigest/gowrap v1.4.1 h1:gZS/XE6ClEHskmhu1bNd0d4wWYcuDzUNsTr7eXC9TYQ= +github.com/hexdigest/gowrap v1.4.1/go.mod h1:s+1hE6qakgdaaLqgdwPAj5qKYVBCSbPJhEbx+I1ef/Q= github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= +github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI= -github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0= -github.com/iris-contrib/i18n v0.0.0-20171121225848-987a633949d0/go.mod h1:pMCz62A0xJL6I+umB2YTlFRwWXaDFA0jy+5HzGiJjqI= -github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw= -github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww= +github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= +github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= +github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/josharian/native v1.1.0 h1:uuaP0hAbW7Y4l0ZRQ6C9zfb7Mg1mbFKry/xzDAfmtLA= +github.com/josharian/native v1.1.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= +github.com/jsimonetti/rtnetlink/v2 v2.0.1 h1:xda7qaHDSVOsADNouv7ukSuicKZO7GgVUCXxpaIEIlM= +github.com/jsimonetti/rtnetlink/v2 v2.0.1/go.mod h1:7MoNYNbb3UaDHtF8udiJo/RH6VsTKP1pqKLUTVCvToE= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= -github.com/juju/loggo v0.0.0-20180524022052-584905176618/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= -github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= -github.com/kataras/golog v0.0.9/go.mod h1:12HJgwBIZFNGL0EJnMRhmvGA0PQGx8VFwrZtM4CqbAk= -github.com/kataras/iris/v12 v12.0.1/go.mod h1:udK4vLQKkdDqMGJJVd/msuMtN6hpYJhg/lSzuxjhO+U= -github.com/kataras/neffos v0.0.10/go.mod h1:ZYmJC07hQPW67eKuzlfY7SO3bC0mw83A3j6im82hfqw= -github.com/kataras/pio v0.0.0-20190103105442-ea782b38602d/go.mod h1:NV88laa9UiiDuX9AhMbDPkGYSPugBOV6yTZB1l2K9Z0= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.9.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g= -github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= -github.com/linode/linodego v1.26.0 h1:2tOZ3Wxn4YvGBRgZi3Vz6dab+L16XUntJ9sJxh3ZBio= -github.com/linode/linodego v1.26.0/go.mod h1:kD7Bf1piWg/AXb9TA0ThAVwzR+GPf6r2PvbTbVk7PMA= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/linode/linodego v1.47.0 h1:6MFNCyzWbr8Rhl4r7d5DwZLwxvFIsM4ARH6W0KS/R0U= +github.com/linode/linodego v1.47.0/go.mod h1:vyklQRzZUWhFVBZdYx4dcYJU/gG9yKB9VUcUs6ub0Lk= +github.com/mackerelio/go-osstat v0.2.5 h1:+MqTbZUhoIt4m8qzkVoXUJg1EuifwlAJSk4Yl2GXh+o= +github.com/mackerelio/go-osstat v0.2.5/go.mod h1:atxwWF+POUZcdtR1wnsUcQxTytoHG4uhl2AKKzrOajY= +github.com/magiconair/properties v1.8.9 h1:nWcCbLq1N2v/cpNsy5WvQ37Fb+YElfq20WJ/a8RkpQM= +github.com/magiconair/properties v1.8.9/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= -github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg= -github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ= -github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/term v0.0.0-20210610120745-9d4ed1856297 h1:yH0SvLzcbZxcJXho2yh7CqdENGMQe73Cw3woZBpPli0= -github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= +github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g= +github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw= +github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U= +github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.1.2 h1:Th2TIvG1+6ma3e/0/bopBKohOTY7s4dA8V2q4EUcBJ0= +github.com/mitchellh/copystructure v1.1.2/go.mod h1:EBArHfARyrSWO/+Wyr9zwEkc6XMFB9XyNgFNmRkZZU4= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= +github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nats-io/nats.go v1.8.1/go.mod h1:BrFz9vVn0fU3AcH9Vn4Kd7W0NpJ651tD5omQ3M8LwxM= -github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= +github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= +github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b h1:FfH+VrHHk6Lxt9HdVS0PXzSXFyS2NbZKXv33FYPol0A= +github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b/go.mod h1:AC62GU6hc0BrNm+9RK9VSiwa/EUe1bkIeFORAMcHvJU= +github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= +github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= +github.com/petermattis/goid v0.0.0-20240813172612-4fcff4a6cae7/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= +github.com/petermattis/goid v0.0.0-20241211131331-93ee7e083c43 h1:ah1dvbqPMN5+ocrg/ZSgZ6k8bOk+kcZQ7fnyx6UvOm4= +github.com/petermattis/goid v0.0.0-20241211131331-93ee7e083c43/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.28.0 h1:vGVfV9KrDTvWt5boZO0I19g2E3CsWfpPPKZM9dt3mEw= -github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ= +github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sagikazarmark/locafero v0.6.0 h1:ON7AQg37yzcRPU69mt7gwhFEBwxI6P9T4Qu3N51bwOk= +github.com/sagikazarmark/locafero v0.6.0/go.mod h1:77OmuIc6VTraTXKXIs/uvUxKGUXjE1GbemJYHqdNjX0= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sasha-s/go-deadlock v0.3.5 h1:tNCOEEDG6tBqrNDOX35j/7hL5FcFViG6awUGROb2NsU= +github.com/sasha-s/go-deadlock v0.3.5/go.mod h1:bugP6EGbdGYObIlx7pUZtWqlvo8k9H6vCBBsiChJQ5U= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= -github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= +github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw= -github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= +github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= +github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= +github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= -github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= -github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= -github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= -github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= -github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= -github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= +github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= +github.com/vishvananda/netlink v1.3.1-0.20241022031324-976bd8de7d81 h1:9fkQcQYvtTr9ayFXuMfDMVuDt4+BYG9FwsGLnrBde0M= +github.com/vishvananda/netlink v1.3.1-0.20241022031324-976bd8de7d81/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs= +github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= +github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY= +github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 h1:S2dVYn90KE98chqDkyE9Z4N61UnQd+KOfgp5Iu53llk= +github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= -go.etcd.io/etcd/api/v3 v3.5.0 h1:GsV3S+OfZEOCNXdtNkBSR7kgLobAa/SO6tCxRa0GAYw= -go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= -go.etcd.io/etcd/client/pkg/v3 v3.5.0 h1:2aQv6F436YnN7I4VbI8PPYrBhu+SmrTaADcf8Mi/6PU= -go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v2 v2.305.0 h1:ftQ0nOOHMcbMS3KIaDQ0g5Qcd6bhaBrQT6b89DfwLTs= -go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= -go.etcd.io/etcd/client/v3 v3.5.0 h1:62Eh0XOro+rDwkrypAGDfgmNh5Joq+z+W9HZdlXMzek= -go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= -go.etcd.io/etcd/pkg/v3 v3.5.0 h1:ntrg6vvKRW26JRmHTE0iNlDgYK6JX3hg/4cD62X0ixk= -go.etcd.io/etcd/raft/v3 v3.5.0 h1:kw2TmO3yFTgE+F0mdKkG7xMxkit2duBDa2Hu6D/HMlw= -go.etcd.io/etcd/server/v3 v3.5.0 h1:jk8D/lwGEDlQU9kZXUFMSANkE22Sg5+mW27ip8xcF9E= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= -go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0= -go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 h1:sO4WKdPAudZGKPcpZT4MJn6JaDmpyLrMPDGGyA1SttE= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0 h1:Q3C9yzW6I9jqEc8sawxzxZmY48fs9u220KXq6d5s3XU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= -go.opentelemetry.io/otel v0.20.0 h1:eaP0Fqu7SXHwvjiqDq83zImeehOHX8doTvU9AwXON8g= -go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= -go.opentelemetry.io/otel/exporters/otlp v0.20.0 h1:PTNgq9MRmQqqJY0REVbZFvwkYOA85vbdQU/nVfxDyqg= -go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= -go.opentelemetry.io/otel/metric v0.20.0 h1:4kzhXFP+btKm4jwxpjIqjs41A7MakRFUS86bqLHTIw8= -go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= -go.opentelemetry.io/otel/oteltest v0.20.0 h1:HiITxCawalo5vQzdHfKeZurV8x7ljcqAgiWzF6Vaeaw= -go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= -go.opentelemetry.io/otel/sdk v0.20.0 h1:JsxtGXd06J8jrnya7fdI/U/MR6yXA5DtbZy+qoHQlr8= -go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= -go.opentelemetry.io/otel/sdk/export/metric v0.20.0 h1:c5VRjxCXdQlx1HjzwGdQHzZaVI82b5EbBgOu2ljD92g= -go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= -go.opentelemetry.io/otel/sdk/metric v0.20.0 h1:7ao1wpzHRVKf0OQ7GIxiQJA6X7DLX9o14gmVon7mMK8= -go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= -go.opentelemetry.io/otel/trace v0.20.0 h1:1DL6EXUdcg95gukhuRRvLDO/4X5THh/5dIV52lqtnbw= -go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= -go.opentelemetry.io/proto/otlp v0.7.0 h1:rwOQPCuKAKmwGKq2aVNnYIibI6wnV7EvzgfTCzcdGg8= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= -go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= -go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE= -go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= +go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= +go.etcd.io/etcd/api/v3 v3.5.17 h1:cQB8eb8bxwuxOilBpMJAEo8fAONyrdXTHUNcMd8yT1w= +go.etcd.io/etcd/api/v3 v3.5.17/go.mod h1:d1hvkRuXkts6PmaYk2Vrgqbv7H4ADfAKhyJqHNLJCB4= +go.etcd.io/etcd/client/pkg/v3 v3.5.17 h1:XxnDXAWq2pnxqx76ljWwiQ9jylbpC4rvkAeRVOUKKVw= +go.etcd.io/etcd/client/pkg/v3 v3.5.17/go.mod h1:4DqK1TKacp/86nJk4FLQqo6Mn2vvQFBmruW3pP14H/w= +go.etcd.io/etcd/client/v2 v2.305.16 h1:kQrn9o5czVNaukf2A2At43cE9ZtWauOtf9vRZuiKXow= +go.etcd.io/etcd/client/v2 v2.305.16/go.mod h1:h9YxWCzcdvZENbfzBTFCnoNumr2ax3F19sKMqHFmXHE= +go.etcd.io/etcd/client/v3 v3.5.17 h1:o48sINNeWz5+pjy/Z0+HKpj/xSnBkuVhVvXkjEXbqZY= +go.etcd.io/etcd/client/v3 v3.5.17/go.mod h1:j2d4eXTHWkT2ClBgnnEPm/Wuu7jsqku41v9DZ3OtjQo= +go.etcd.io/etcd/pkg/v3 v3.5.16 h1:cnavs5WSPWeK4TYwPYfmcr3Joz9BH+TZ6qoUtz6/+mc= +go.etcd.io/etcd/pkg/v3 v3.5.16/go.mod h1:+lutCZHG5MBBFI/U4eYT5yL7sJfnexsoM20Y0t2uNuY= +go.etcd.io/etcd/raft/v3 v3.5.16 h1:zBXA3ZUpYs1AwiLGPafYAKKl/CORn/uaxYDwlNwndAk= +go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI= +go.etcd.io/etcd/server/v3 v3.5.16 h1:d0/SAdJ3vVsZvF8IFVb1k8zqMZ+heGcNfft71ul9GWE= +go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s= +go.mongodb.org/mongo-driver v1.17.1 h1:Wic5cJIwJgSpBhe3lx3+/RybR5PiYRMpVFgO7cOHyIM= +go.mongodb.org/mongo-driver v1.17.1/go.mod h1:wwWm/+BuOddhcq3n68LKRmgk2wXzmF6s0SFOa0GINL4= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= +go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= +go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= +go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= +go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= +go.uber.org/dig v1.18.0/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go4.org/netipx v0.0.0-20231129151722-fdeea329fbba h1:0b9z3AuHCjxk0x/opv64kcgZLBseWJUpBw5I82+2U4M= +go4.org/netipx v0.0.0-20231129151722-fdeea329fbba/go.mod h1:PLyyIXexvUFg3Owu6p/WfdlivPbZJsZdgWZlrGope/Y= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190422183909-d864b10871cd/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20240119083558-1b970713d09a h1:Q8/wZp0KX97QFTc2ywcOE0YRjZPVIx+MXInMzdvQqcA= -golang.org/x/exp v0.0.0-20240119083558-1b970713d09a/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= +golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= +golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e h1:4qufH0hlUYs6AO6XmZC3GqfDPGSXHVXUFR6OND+iJX4= +golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= -golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= +golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= +golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= +golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= +golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= -golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= -golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= +golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc= -golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= +golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE= +golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/password-generator v0.2.4/go.mod h1:TvwYYTx9+P1pPwKQKfZgB/wr2Id9MqAQ3B5auY7reNg= gomodules.xyz/version v0.1.0/go.mod h1:Y8xuV02mL/45psyPKG3NCVOwvAOy6T5Kx0l3rCjKSjU= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2 h1:NHN4wOCScVzKhPenJ2dt+BTs3X/XkBVI/Rh4iDt55T8= -google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= -google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y= +google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s= +google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= +google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1:8ZmaLZE4XWrtU3MyClkYqqtl6Oegr3235h7jxsDyqCY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= +google.golang.org/grpc v1.69.0 h1:quSiOM1GJPmPH5XtU+BCoVXcDVJJAzNcoyfC2cCjGkI= +google.golang.org/grpc v1.69.0/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= +google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= -gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.66.6 h1:LATuAqN/shcYAOkv3wl2L4rkaKqkcgTBQjOyYDvcPKI= -gopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= -gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= -gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.23.17 h1:gC11V5AIsNXUUa/xd5RQo7djukvl5O1ZDQKwEYu0H7g= -k8s.io/api v0.23.17/go.mod h1:upM9VIzXUjEyLTmGGi0KnH8kdlPnvgv+fEJ3tggDHfE= -k8s.io/apimachinery v0.23.17 h1:ipJ0SrpI6EzH8zVw0WhCBldgJhzIamiYIumSGTdFExY= -k8s.io/apimachinery v0.23.17/go.mod h1:87v5Wl9qpHbnapX1PSNgln4oO3dlyjAU3NSIwNhT4Lo= -k8s.io/apiserver v0.23.17 h1:0br6oJhknp1mT0epMS84ibj+XcpmthPd60B5bPdbko8= -k8s.io/apiserver v0.23.17/go.mod h1:Z5Wx5AY9iCZDblpI37Rzs099Rwi192FoS4iWDVODU9M= -k8s.io/client-go v0.23.17 h1:MbW05RO5sy+TFw2ds36SDdNSkJbr8DFVaaVrClSA8Vs= -k8s.io/client-go v0.23.17/go.mod h1:X5yz7nbJHS7q8977AKn8BWKgxeAXjl1sFsgstczUsCM= -k8s.io/cloud-provider v0.23.17 h1:Kw0MqtoKSkTNXAOxPUN8iQWOxx5UcEGZhVOWDkIkQ+A= -k8s.io/cloud-provider v0.23.17/go.mod h1:dZL4KeG2HT3jY5d9ntlg5jBO1UGgqHGdp0FMQ3oNfJA= -k8s.io/component-base v0.23.17 h1:yWK39HTP+rUPjr8HGvNzLECZWibcZcYsGiiQhrNH6zM= -k8s.io/component-base v0.23.17/go.mod h1:m/Em46sTbBgGa4O1K8jRXCWlJEkzBwKt18ipv3ckSCc= -k8s.io/component-helpers v0.23.17 h1:zDqL5GfaJVy7cPVS1gCAA0NwEvPheQYPAxwAMPvcw/U= -k8s.io/component-helpers v0.23.17/go.mod h1:CDkNKvJCUqICwwd8tc7QtdCCokaXSOGlLkemMZM4AJ0= -k8s.io/controller-manager v0.23.17 h1:8JgND68i4AP9ROBx36h8VdI4XLUXmbhYsrQKSveeZys= -k8s.io/controller-manager v0.23.17/go.mod h1:CmcKLAkziOvgVbR8i6bf65XRpcVJVZ+bSXdvJ6X7jRk= -k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= -k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= -k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4= -k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= -k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20211116205334-6203023598ed h1:ck1fRPWPJWsMd8ZRFsWc6mh/zHp5fZ/shhbrgPUxDAE= -k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.35 h1:+xBL5uTc+BkPBwmMi3vYfUJjq+N3K+H6PXeETwf5cPI= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.35/go.mod h1:WxjusMwXlKzfAs4p9km6XJRndVt2FROgMVCE4cdohFo= -sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s= -sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= -sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +k8s.io/api v0.32.1 h1:f562zw9cy+GvXzXf0CKlVQ7yHJVYzLfL6JAS4kOAaOc= +k8s.io/api v0.32.1/go.mod h1:/Yi/BqkuueW1BgpoePYBRdDYfjPF5sgTr5+YqDZra5k= +k8s.io/apiextensions-apiserver v0.30.2 h1:l7Eue2t6QiLHErfn2vwK4KgF4NeDgjQkCXtEbOocKIE= +k8s.io/apiextensions-apiserver v0.30.2/go.mod h1:lsJFLYyK40iguuinsb3nt+Sj6CmodSI4ACDLep1rgjw= +k8s.io/apimachinery v0.32.1 h1:683ENpaCBjma4CYqsmZyhEzrGz6cjn1MY/X2jB2hkZs= +k8s.io/apimachinery v0.32.1/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/apiserver v0.32.1 h1:oo0OozRos66WFq87Zc5tclUX2r0mymoVHRq8JmR7Aak= +k8s.io/apiserver v0.32.1/go.mod h1:UcB9tWjBY7aryeI5zAgzVJB/6k7E97bkr1RgqDz0jPw= +k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU= +k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg= +k8s.io/cloud-provider v0.32.1 h1:74rRhnfca3o4CsjjnIp/C3ARVuSmyNsxgWPtH0yc9Z0= +k8s.io/cloud-provider v0.32.1/go.mod h1:GECSanFT+EeZ/ToX3xlasjETzMUI+VFu92zHUDUsGHw= +k8s.io/component-base v0.32.1 h1:/5IfJ0dHIKBWysGV0yKTFfacZ5yNV1sulPh3ilJjRZk= +k8s.io/component-base v0.32.1/go.mod h1:j1iMMHi/sqAHeG5z+O9BFNCF698a1u0186zkjMZQ28w= +k8s.io/component-helpers v0.32.1 h1:TwdsSM1vW9GjnfX18lkrZbwE5G9psCIS2/rhenTDXd8= +k8s.io/component-helpers v0.32.1/go.mod h1:1JT1Ei3FD29yFQ18F3laj1WyvxYdHIhyxx6adKMFQXI= +k8s.io/controller-manager v0.32.1 h1:z3oQp1O5l0cSzM/MKf8V4olhJ9TmnELoJRPcV/v1s+Y= +k8s.io/controller-manager v0.32.1/go.mod h1:dVA1UZPbqHH4hEhrrnLvQ4d5qVQCklNB8GEzYV59v/4= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kms v0.32.1 h1:TW6cswRI/fawoQRFGWLmEceO37rZXupdoRdmO019jCc= +k8s.io/kms v0.32.1/go.mod h1:Bk2evz/Yvk0oVrvm4MvZbgq8BD34Ksxs2SRHn4/UiOM= +k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 h1:hcha5B1kVACrLujCKLbr8XWMxCxzQx42DY8QKYJrDLg= +k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7/go.mod h1:GewRfANuJ70iYzvn+i4lezLDAFzvjxZYK1gn1lWcfas= +k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= +k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.1 h1:uOuSLOMBWkJH0TWa9X6l+mj5nZdm6Ay6Bli8HL8rNfk= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.1/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/structured-merge-diff/v4 v4.5.0 h1:nbCitCK2hfnhyiKo6uf2HxUPTCodY6Qaf85SbDIaMBk= +sigs.k8s.io/structured-merge-diff/v4 v4.5.0/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/hack/templates/prometheus.go.gotpl b/hack/templates/prometheus.go.gotpl new file mode 100644 index 00000000..4c582d08 --- /dev/null +++ b/hack/templates/prometheus.go.gotpl @@ -0,0 +1,43 @@ +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +{{ $decorator := (or .Vars.DecoratorName (printf "%sWithPrometheus" .Interface.Name)) }} +{{ $metric_name := (or .Vars.MetricName (printf "ccm_linode_%s_requests_total" (down .Interface.Name))) }} + +// {{$decorator}} implements {{.Interface.Type}} interface with all methods wrapped +// with Prometheus counters +type {{$decorator}} struct { + base {{.Interface.Type}} +} + +var {{upFirst .Interface.Name}}MethodCounterVec = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "{{$metric_name}}", + Help: "{{ down .Interface.Name }} counters for each operation and its result", + }, + []string{"method", "result"}) + +// New{{.Interface.Name}}WithPrometheus returns an instance of the {{.Interface.Type}} decorated with prometheus metrics +func New{{$decorator}}(base {{.Interface.Type}}) {{$decorator}} { + return {{$decorator}} { + base: base, + } +} + +{{range $method := .Interface.Methods}} + // {{$method.Name}} implements {{$.Interface.Type}} + func (_d {{$decorator}}) {{$method.Declaration}} { + defer func() { + result := "ok" + {{- if $method.ReturnsError}} + if err != nil { + result = "error" + } + {{end}} + {{upFirst $.Interface.Name}}MethodCounterVec.WithLabelValues("{{$method.Name}}", result).Inc() + }() + {{$method.Pass "_d.base."}} + } +{{end}} diff --git a/main.go b/main.go index f82c9e18..6577277c 100644 --- a/main.go +++ b/main.go @@ -4,6 +4,7 @@ import ( "context" "flag" "fmt" + "net" "os" "k8s.io/component-base/logs" @@ -11,10 +12,10 @@ import ( "github.com/linode/linode-cloud-controller-manager/cloud/linode" "github.com/linode/linode-cloud-controller-manager/sentry" "github.com/spf13/pflag" - "k8s.io/apimachinery/pkg/util/wait" cloudprovider "k8s.io/cloud-provider" "k8s.io/cloud-provider/app" "k8s.io/cloud-provider/app/config" + "k8s.io/cloud-provider/names" "k8s.io/cloud-provider/options" utilflag "k8s.io/component-base/cli/flag" "k8s.io/klog/v2" @@ -24,9 +25,10 @@ import ( ) const ( - sentryDSNVariable = "SENTRY_DSN" - sentryEnvironmentVariable = "SENTRY_ENVIRONMENT" - sentryReleaseVariable = "SENTRY_RELEASE" + sentryDSNVariable = "SENTRY_DSN" + sentryEnvironmentVariable = "SENTRY_ENVIRONMENT" + sentryReleaseVariable = "SENTRY_RELEASE" + linodeExternalSubnetVariable = "LINODE_EXTERNAL_SUBNET" ) func initializeSentry() { @@ -72,10 +74,20 @@ func main() { klog.Fatalf("unable to initialize command options: %v", err) } fss := utilflag.NamedFlagSets{} - command := app.NewCloudControllerManagerCommand(ccmOptions, cloudInitializer, app.DefaultInitFuncConstructors, fss, wait.NeverStop) + controllerAliases := names.CCMControllerAliases() + stopCh := make(chan struct{}) + command := app.NewCloudControllerManagerCommand(ccmOptions, cloudInitializer, app.DefaultInitFuncConstructors, controllerAliases, fss, stopCh) // Add Linode-specific flags command.Flags().BoolVar(&linode.Options.LinodeGoDebug, "linodego-debug", false, "enables debug output for the LinodeAPI wrapper") + command.Flags().BoolVar(&linode.Options.EnableRouteController, "enable-route-controller", false, "enables route_controller for ccm") + command.Flags().BoolVar(&linode.Options.EnableTokenHealthChecker, "enable-token-health-checker", false, "enables Linode API token health checker") + command.Flags().StringVar(&linode.Options.VPCName, "vpc-name", "", "[deprecated: use vpc-names instead] vpc name whose routes will be managed by route-controller") + command.Flags().StringVar(&linode.Options.VPCNames, "vpc-names", "", "comma separated vpc names whose routes will be managed by route-controller") + command.Flags().StringVar(&linode.Options.LoadBalancerType, "load-balancer-type", "nodebalancer", "configures which type of load-balancing to use for LoadBalancer Services (options: nodebalancer, cilium-bgp)") + command.Flags().StringVar(&linode.Options.BGPNodeSelector, "bgp-node-selector", "", "node selector to use to perform shared IP fail-over with BGP (e.g. cilium-bgp-peering=true") + command.Flags().StringVar(&linode.Options.IpHolderSuffix, "ip-holder-suffix", "", "suffix to append to the ip holder name when using shared IP fail-over with BGP (e.g. ip-holder-suffix=my-cluster-name") + command.Flags().StringSliceVar(&linode.Options.NodeBalancerTags, "nodebalancer-tags", []string{}, "Linode tags to apply to all NodeBalancers") // Set static flags command.Flags().VisitAll(func(fl *pflag.Flag) { @@ -103,11 +115,25 @@ func main() { linode.Options.KubeconfigFlag = command.Flags().Lookup("kubeconfig") if linode.Options.KubeconfigFlag == nil { msg := "kubeconfig missing from CCM flag set" - sentry.CaptureError(ctx, fmt.Errorf(msg)) + sentry.CaptureError(ctx, fmt.Errorf("%s", msg)) fmt.Fprintf(os.Stderr, "kubeconfig missing from CCM flag set"+"\n") os.Exit(1) } + if externalSubnet, ok := os.LookupEnv(linodeExternalSubnetVariable); ok && externalSubnet != "" { + _, network, err := net.ParseCIDR(externalSubnet) + if err != nil { + msg := fmt.Sprintf("Unable to parse %s as network subnet: %v", externalSubnet, err) + sentry.CaptureError(ctx, fmt.Errorf("%s", msg)) + fmt.Fprintf(os.Stderr, "%v\n", msg) + os.Exit(1) + } + linode.Options.LinodeExternalNetwork = network + } + + // Provide stop channel for linode authenticated client healthchecker + linode.Options.GlobalStopChannel = stopCh + pflag.CommandLine.SetNormalizeFunc(utilflag.WordSepNormalizeFunc) pflag.CommandLine.AddGoFlagSet(flag.CommandLine) diff --git a/sentry/sentry_test.go b/sentry/sentry_test.go new file mode 100644 index 00000000..b26dc9d7 --- /dev/null +++ b/sentry/sentry_test.go @@ -0,0 +1,195 @@ +package sentry + +import ( + "context" + "testing" + + "github.com/getsentry/sentry-go" + "github.com/stretchr/testify/assert" +) + +func TestInitialize(t *testing.T) { + // Reset the initialized flag before each test + initialized = false + + tests := []struct { + name string + dsn string + environment string + release string + wantErr bool + }{ + { + name: "successful initialization", + dsn: "https://test@sentry.io/123", + environment: "test", + release: "1.0.0", + wantErr: false, + }, + { + name: "empty DSN", + dsn: "", + environment: "test", + release: "1.0.0", + wantErr: true, + }, + { + name: "double initialization", + dsn: "https://test@sentry.io/123", + environment: "test", + release: "1.0.0", + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := Initialize(tt.dsn, tt.environment, tt.release) + if tt.wantErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.True(t, initialized) + } + }) + } +} + +func TestSetHubOnContext(t *testing.T) { + // Reset the initialized flag + initialized = false + _ = Initialize("https://test@sentry.io/123", "test", "1.0.0") + + ctx := context.Background() + newCtx := SetHubOnContext(ctx) + + assert.True(t, sentry.HasHubOnContext(newCtx)) + assert.NotNil(t, sentry.GetHubFromContext(newCtx)) +} + +func TestGetHubFromContext(t *testing.T) { + tests := []struct { + name string + setupFunc func() context.Context + initialized bool + wantNil bool + }{ + { + name: "valid hub in context", + setupFunc: func() context.Context { + ctx := context.Background() + return SetHubOnContext(ctx) + }, + initialized: true, + wantNil: false, + }, + { + name: "no hub in context", + setupFunc: func() context.Context { + return context.Background() + }, + initialized: true, + wantNil: true, + }, + { + name: "sentry not initialized", + setupFunc: func() context.Context { + return context.Background() + }, + initialized: false, + wantNil: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Reset the initialized flag + initialized = false + if tt.initialized { + _ = Initialize("https://test@sentry.io/123", "test", "1.0.0") + } + + ctx := tt.setupFunc() + hub := getHubFromContext(ctx) + + if tt.wantNil { + assert.Nil(t, hub) + } else { + assert.NotNil(t, hub) + } + }) + } +} + +func TestSetTag(t *testing.T) { + // Reset the initialized flag + initialized = false + _ = Initialize("https://test@sentry.io/123", "test", "1.0.0") + + tests := []struct { + name string + setupFunc func() context.Context + key string + value string + }{ + { + name: "set tag with valid hub", + setupFunc: func() context.Context { + return SetHubOnContext(context.Background()) + }, + key: "test-key", + value: "test-value", + }, + { + name: "set tag with no hub", + setupFunc: func() context.Context { + return context.Background() + }, + key: "test-key", + value: "test-value", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := tt.setupFunc() + // This should not panic + SetTag(ctx, tt.key, tt.value) + }) + } +} + +func TestCaptureError(t *testing.T) { + // Reset the initialized flag + initialized = false + _ = Initialize("https://test@sentry.io/123", "test", "1.0.0") + + tests := []struct { + name string + setupFunc func() context.Context + err error + }{ + { + name: "capture error with valid hub", + setupFunc: func() context.Context { + return SetHubOnContext(context.Background()) + }, + err: assert.AnError, + }, + { + name: "capture error with no hub", + setupFunc: func() context.Context { + return context.Background() + }, + err: assert.AnError, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := tt.setupFunc() + // This should not panic + CaptureError(ctx, tt.err) + }) + } +}