diff --git a/.github/workflows/build-test.yml b/.github/workflows/build-test.yml new file mode 100644 index 00000000..c449929f --- /dev/null +++ b/.github/workflows/build-test.yml @@ -0,0 +1,113 @@ +name: Build Test +on: + push: + branches: + - main + pull_request: null + +permissions: + contents: read + pull-requests: read + actions: read + +concurrency: + group: build-test-${{ github.event.pull_request.number || github.ref_name }} + cancel-in-progress: true + +jobs: + changes: + runs-on: ubuntu-latest + outputs: + paths: ${{ steps.filter.outputs.changes }} + steps: + - uses: actions/checkout@v4.2.2 + with: + ref: ${{ github.event.pull_request.head.sha }} + - name: Harden Runner + uses: step-security/harden-runner@v2 + with: + disable-sudo: true + egress-policy: block + allowed-endpoints: > + api.github.com:443 + github.com:443 + - uses: dorny/paths-filter@v3 + id: filter + with: + base: ${{ github.ref }} + filters: .github/filters.yml + + build-test: + runs-on: ubuntu-latest + needs: changes + if: ${{ contains(fromJSON(needs.changes.outputs.paths), 'src') }} + steps: + - name: Harden Runner + uses: step-security/harden-runner@v2 + with: + disable-sudo: true + egress-policy: block + allowed-endpoints: > + api.github.com:443 + github.com:443 + golang.org:443 + proxy.golang.org:443 + sum.golang.org:443 + objects.githubusercontent.com:443 + storage.googleapis.com:443 + cli.codecov.io:443 + api.codecov.io:443 + ingest.codecov.io:443 + raw.githubusercontent.com:443 + get.helm.sh:443 + golangci-lint.run:443 + + - uses: actions/checkout@v4.2.2 + with: + fetch-depth: 0 + - uses: actions/setup-go@v5 + with: + go-version-file: go.mod + check-latest: true + + - name: Vet + run: make vet + + - name: lint + uses: golangci/golangci-lint-action@v6 + with: + version: v1.64.5 + + - name: Helm Lint + run: make helm-lint + + - name: Test + run: make test + + - name: Build + run: make build + + docker-build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4.2.2 + with: + fetch-depth: 0 + - name: Docker Meta + id: meta + uses: docker/metadata-action@v5 + with: + images: | + linode/linode-cloud-controller-manager + tags: | + type=raw,value=pr-${{ github.event.pull_request.number }},enable=${{ github.event_name == 'pull_request' }} + type=raw,value=latest,enable=${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }} + - name: Build Dockerfile + uses: docker/build-push-action@v6 + with: + context: . + push: false + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + build-args: | + REV=${{ github.ref_name }} diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c034a43a..11aa99c6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -12,7 +12,7 @@ permissions: actions: read concurrency: - group: ci-${{ github.ref }} + group: ci-${{ github.event.pull_request.number || github.ref_name }} cancel-in-progress: true jobs: @@ -38,7 +38,7 @@ jobs: base: ${{ github.ref }} filters: .github/filters.yml - build-test: + codecov: runs-on: ubuntu-latest environment: ${{ github.event.pull_request.head.repo.fork == true && 'prod-external' || 'prod' }} needs: changes @@ -61,7 +61,6 @@ jobs: api.codecov.io:443 ingest.codecov.io:443 raw.githubusercontent.com:443 - get.helm.sh:443 - uses: actions/checkout@v4.2.2 with: @@ -71,17 +70,6 @@ jobs: go-version-file: go.mod check-latest: true - - name: Vet - run: make vet - - - name: lint - uses: golangci/golangci-lint-action@v6 - with: - version: v1.62.2 - - - name: Helm Lint - run: make helm-lint - - name: Test run: make test @@ -93,35 +81,6 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} slug: linode/linode-cloud-controller-manager - - name: Build - run: make build - - docker-build: - runs-on: ubuntu-latest - environment: ${{ github.event.pull_request.head.repo.fork == true && 'prod-external' || 'prod' }} - steps: - - uses: actions/checkout@v4.2.2 - with: - fetch-depth: 0 - - name: Docker Meta - id: meta - uses: docker/metadata-action@v5 - with: - images: | - linode/linode-cloud-controller-manager - tags: | - type=raw,value=latest,enable=${{ github.ref == format('refs/heads/{0}', 'main') }} - type=semver,pattern={{raw}},value=${{ github.ref_name }} - - name: Build Dockerfile - uses: docker/build-push-action@v6 - with: - context: . - push: false - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} - build-args: | - REV=${{ github.ref_name }} - e2e-tests: runs-on: ubuntu-latest environment: ${{ github.event.pull_request.head.repo.fork == true && 'prod-external' || 'prod' }} @@ -130,7 +89,7 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.github_token }} LINODE_TOKEN: ${{ secrets.LINODE_TOKEN }} - IMG: linode/linode-cloud-controller-manager:${{ github.ref == 'refs/heads/main' && 'latest' || format('pr-{0}', github.event.number) || github.ref_name }} + IMG: linode/linode-cloud-controller-manager:${{ github.event_name == 'push' && github.ref == 'refs/heads/main' && 'latest' || format('pr-{0}', github.event.pull_request.number) || github.ref_name }} LINODE_REGION: us-lax LINODE_CONTROL_PLANE_MACHINE_TYPE: g6-standard-2 LINODE_MACHINE_TYPE: g6-standard-2 @@ -164,6 +123,9 @@ jobs: - name: Run Cilium BGP e2e test run: devbox run e2e-test-bgp + - name: Run subnet filtering test + run: devbox run e2e-test-subnet + - name: Cleanup Resources if: always() run: devbox run cleanup-cluster diff --git a/.golangci.yml b/.golangci.yml index fcb5072d..c38b869a 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,5 +1,98 @@ run: timeout: 5m + issues-exit-code: 1 + +output: + formats: + - format: colored-line-number + +linters-settings: + cyclop: + max-complexity: 15 + + depguard: + rules: + main: + files: + - "$all" + - "!$test" + deny: + # TODO: Remove reflect from loadbalancers.go and reinstate this requirement + #- pkg: "reflect" + # desc: "Reflection is never clear." + - pkg: "gob" + desc: "Please convert types manually" + + dupl: + threshold: 100 + + errcheck: + check-type-assertions: true + check-blank: true + exclude-functions: + - fmt:.* + - io/ioutil:^Read.* + + gci: + sections: + - standard + - default + - blank + - dot + - prefix(github.com/linode/linode-cloud-controller-manager) + + goconst: + min-len: 3 + min-occurrences: 5 + + gocritic: + enabled-tags: + - diagnostic + - experimental + - opinionated + - performance + - style + + settings: + captLocal: + paramsOnly: true + rangeValCopy: + sizeThreshold: 32 + + gofmt: + simplify: true + + goimports: + local-prefixes: github.com/linode/linode-cloud-controller-manager + + govet: + enable: + - shadow + + gosec: + confidence: "medium" + excludes: + - G115 + + mnd: + checks: + - case + - condition + - operation + - return + # Ignoring argument and assign due to the number of cases we're ignoring + + nolintlint: + require-explanation: true + require-specific: true + + prealloc: + simple: true + range-loops: true + for-loops: true + + varnamelen: + min-name-length: 2 linters: disable-all: true @@ -12,25 +105,102 @@ linters: - staticcheck - typecheck - unused - # cherry picked from https://golangci-lint.run/usage/linters/ - # - ginkgolinter # to be enabled once #158 is merged + - ginkgolinter + - asasalint + - asciicheck + - bidichk - bodyclose + - containedctx + - contextcheck - copyloopvar + #- cyclop + - decorder + - depguard + - dogsled + - dupl + - dupword + - durationcheck + - errchkjson + - errname + - errorlint + - exhaustive + - forbidigo + - forcetypeassert + - gci - gocheckcompilerdirectives - gofmt - goimports + #- gocognit + - goconst + #- gocritic + - gofumpt + - goprintffuncname + - gosec - importas - loggercheck + - maintidx - makezero + - misspell + - mnd + - musttag + #- nestif - nilerr + - nilnil + - noctx + - nolintlint + - nosprintfhostport + #- paralleltest # adding t.Parallel() to tests broke so many and made others flaky - prealloc + - predeclared - reassign - - tenv + - testifylint + - thelper - unconvert - - wastedassign - unparam - - gofumpt - - nosprintfhostport - - musttag - - exhaustive - - nilnil + - usestdlibvars + - usetesting + - varnamelen + - wastedassign + - whitespace + + presets: + - bugs + - unused + fast: false + +issues: + exclude-rules: + # Exclude some linters from running on tests files. + - path: _test(ing)?\.go + linters: + - gocyclo + - maintidx + - errcheck + - dupl + - gosec + - copyloopvar + - unparam + - varnamelen + + # Ease some gocritic warnings on test files. + - path: _test\.go + text: "(unnamedResult|exitAfterDefer)" + linters: + - gocritic + + - text: "G101:" + linters: + - gosec + - gas + + - text: "G104:" + linters: + - gosec + - gas + + exclude-use-default: false + new: false + max-issues-per-linter: 0 + max-same-issues: 0 + exclude-files: + - "zz_generated\\..+\\.go$" diff --git a/Dockerfile b/Dockerfile index 24ed3827..8dc47101 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.23-alpine AS builder +FROM golang:1.24-alpine AS builder RUN mkdir -p /linode WORKDIR /linode @@ -11,7 +11,7 @@ COPY sentry ./sentry RUN go mod download RUN go build -a -ldflags '-extldflags "-static"' -o /bin/linode-cloud-controller-manager-linux /linode -FROM alpine:3.21.2 +FROM alpine:3.21.3 RUN apk add --update --no-cache ca-certificates LABEL maintainers="Linode" LABEL description="Linode Cloud Controller Manager" diff --git a/Makefile b/Makefile index 42a96e2a..6812df15 100644 --- a/Makefile +++ b/Makefile @@ -14,16 +14,21 @@ HELM_VERSION ?= v3.16.3 # Dev Setup ##################################################################### CLUSTER_NAME ?= ccm-$(shell git rev-parse --short HEAD) +SUBNET_CLUSTER_NAME ?= subnet-testing-$(shell git rev-parse --short HEAD) +VPC_NAME ?= $(CLUSTER_NAME) +MANIFEST_NAME ?= capl-cluster-manifests +SUBNET_MANIFEST_NAME ?= subnet-testing-manifests K8S_VERSION ?= "v1.31.2" CAPI_VERSION ?= "v1.8.5" CAAPH_VERSION ?= "v0.2.1" -CAPL_VERSION ?= "v0.7.1" +CAPL_VERSION ?= "v0.8.5" CONTROLPLANE_NODES ?= 1 WORKER_NODES ?= 1 LINODE_FIREWALL_ENABLED ?= true LINODE_REGION ?= us-lax LINODE_OS ?= linode/ubuntu22.04 KUBECONFIG_PATH ?= $(CURDIR)/test-cluster-kubeconfig.yaml +SUBNET_KUBECONFIG_PATH ?= $(CURDIR)/subnet-testing-kubeconfig.yaml MGMT_KUBECONFIG_PATH ?= $(CURDIR)/mgmt-cluster-kubeconfig.yaml # if the $DEVBOX_PACKAGES_DIR env variable exists that means we are within a devbox shell and can safely @@ -74,7 +79,7 @@ fmt: .PHONY: test # we say code is not worth testing unless it's formatted test: fmt codegen - go test -v -cover -coverprofile ./coverage.out ./cloud/... ./sentry/... $(TEST_ARGS) + go test -v -coverpkg=./sentry,./cloud/linode/client,./cloud/linode/firewall,./cloud/linode -coverprofile ./coverage.out -cover ./sentry/... ./cloud/... $(TEST_ARGS) .PHONY: build-linux build-linux: codegen @@ -144,14 +149,15 @@ capl-cluster: generate-capl-cluster-manifests create-capl-cluster patch-linode-c .PHONY: generate-capl-cluster-manifests generate-capl-cluster-manifests: # Create the CAPL cluster manifests without any CSI driver stuff - LINODE_FIREWALL_ENABLED=$(LINODE_FIREWALL_ENABLED) LINODE_OS=$(LINODE_OS) clusterctl generate cluster $(CLUSTER_NAME) \ + LINODE_FIREWALL_ENABLED=$(LINODE_FIREWALL_ENABLED) LINODE_OS=$(LINODE_OS) VPC_NAME=$(VPC_NAME) clusterctl generate cluster $(CLUSTER_NAME) \ --kubernetes-version $(K8S_VERSION) --infrastructure linode-linode:$(CAPL_VERSION) \ - --control-plane-machine-count $(CONTROLPLANE_NODES) --worker-machine-count $(WORKER_NODES) > capl-cluster-manifests.yaml + --control-plane-machine-count $(CONTROLPLANE_NODES) --worker-machine-count $(WORKER_NODES) > $(MANIFEST_NAME).yaml + yq -i e 'select(.kind == "LinodeVPC").spec.subnets = [{"ipv4": "10.0.0.0/8", "label": "default"}, {"ipv4": "172.16.0.0/16", "label": "testing"}]' $(MANIFEST_NAME).yaml .PHONY: create-capl-cluster create-capl-cluster: # Create a CAPL cluster with updated CCM and wait for it to be ready - kubectl apply -f capl-cluster-manifests.yaml + kubectl apply -f $(MANIFEST_NAME).yaml kubectl wait --for=condition=ControlPlaneReady cluster/$(CLUSTER_NAME) --timeout=600s || (kubectl get cluster -o yaml; kubectl get linodecluster -o yaml; kubectl get linodemachines -o yaml) kubectl wait --for=condition=NodeHealthy=true machines -l cluster.x-k8s.io/cluster-name=$(CLUSTER_NAME) --timeout=900s clusterctl get kubeconfig $(CLUSTER_NAME) > $(KUBECONFIG_PATH) @@ -162,6 +168,7 @@ create-capl-cluster: .PHONY: patch-linode-ccm patch-linode-ccm: KUBECONFIG=$(KUBECONFIG_PATH) kubectl patch -n kube-system daemonset ccm-linode --type='json' -p="[{'op': 'replace', 'path': '/spec/template/spec/containers/0/image', 'value': '${IMG}'}]" + KUBECONFIG=$(KUBECONFIG_PATH) kubectl patch -n kube-system daemonset ccm-linode --type='json' -p='[{"op": "add", "path": "/spec/template/spec/containers/0/env/-", "value": {"name": "LINODE_API_VERSION", "value": "v4beta"}}]' KUBECONFIG=$(KUBECONFIG_PATH) kubectl rollout status -n kube-system daemonset/ccm-linode --timeout=600s KUBECONFIG=$(KUBECONFIG_PATH) kubectl -n kube-system get daemonset/ccm-linode -o yaml @@ -193,7 +200,7 @@ e2e-test: KUBECONFIG=$(KUBECONFIG_PATH) \ REGION=$(LINODE_REGION) \ LINODE_TOKEN=$(LINODE_TOKEN) \ - chainsaw test e2e/test --parallel 2 + chainsaw test e2e/test --parallel 2 $(E2E_FLAGS) .PHONY: e2e-test-bgp e2e-test-bgp: @@ -204,7 +211,28 @@ e2e-test-bgp: KUBECONFIG=$(KUBECONFIG_PATH) \ REGION=$(LINODE_REGION) \ LINODE_TOKEN=$(LINODE_TOKEN) \ - chainsaw test e2e/bgp-test/lb-cilium-bgp + chainsaw test e2e/bgp-test/lb-cilium-bgp $(E2E_FLAGS) + +.PHONY: e2e-test-subnet +e2e-test-subnet: + # Generate cluster manifests for second cluster + SUBNET_NAME=testing CLUSTER_NAME=$(SUBNET_CLUSTER_NAME) MANIFEST_NAME=$(SUBNET_MANIFEST_NAME) VPC_NAME=$(CLUSTER_NAME) \ + VPC_NETWORK_CIDR=172.16.0.0/16 K8S_CLUSTER_CIDR=172.16.64.0/18 make generate-capl-cluster-manifests + # Add subnetNames to HelmChartProxy + yq e 'select(.kind == "HelmChartProxy" and .spec.chartName == "ccm-linode").spec.valuesTemplate' $(SUBNET_MANIFEST_NAME).yaml > tmp.yaml + yq -i e '.routeController += {"subnetNames": "testing"}' tmp.yaml + yq -i e '.routeController.vpcNames = "{{.InfraCluster.spec.vpcRef.name}}"' tmp.yaml + yq -i e 'select(.kind == "HelmChartProxy" and .spec.chartName == "ccm-linode").spec.valuesTemplate = load_str("tmp.yaml")' $(SUBNET_MANIFEST_NAME).yaml + rm tmp.yaml + # Create the second cluster + MANIFEST_NAME=$(SUBNET_MANIFEST_NAME) CLUSTER_NAME=$(SUBNET_CLUSTER_NAME) KUBECONFIG_PATH=$(SUBNET_KUBECONFIG_PATH) \ + make create-capl-cluster + KUBECONFIG_PATH=$(SUBNET_KUBECONFIG_PATH) make patch-linode-ccm + # Run chainsaw test + LINODE_TOKEN=$(LINODE_TOKEN) \ + FIRST_CONFIG=$(KUBECONFIG_PATH) \ + SECOND_CONFIG=$(SUBNET_KUBECONFIG_PATH) \ + chainsaw test e2e/subnet-test $(E2E_FLAGS) ##################################################################### # OS / ARCH diff --git a/cloud/annotations/annotations.go b/cloud/annotations/annotations.go index 21736009..2de0674d 100644 --- a/cloud/annotations/annotations.go +++ b/cloud/annotations/annotations.go @@ -23,14 +23,24 @@ const ( AnnLinodeLoadBalancerPreserve = "service.beta.kubernetes.io/linode-loadbalancer-preserve" AnnLinodeNodeBalancerID = "service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-id" + AnnLinodeNodeBalancerType = "service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-type" AnnLinodeHostnameOnlyIngress = "service.beta.kubernetes.io/linode-loadbalancer-hostname-only-ingress" AnnLinodeLoadBalancerTags = "service.beta.kubernetes.io/linode-loadbalancer-tags" AnnLinodeCloudFirewallID = "service.beta.kubernetes.io/linode-loadbalancer-firewall-id" AnnLinodeCloudFirewallACL = "service.beta.kubernetes.io/linode-loadbalancer-firewall-acl" + // AnnLinodeEnableIPv6Ingress is the annotation used to specify that a service should include both IPv4 and IPv6 + // addresses for its LoadBalancer ingress. When set to "true", both addresses will be included in the status. + AnnLinodeEnableIPv6Ingress = "service.beta.kubernetes.io/linode-loadbalancer-enable-ipv6-ingress" + AnnLinodeNodePrivateIP = "node.k8s.linode.com/private-ip" AnnLinodeHostUUID = "node.k8s.linode.com/host-uuid" AnnLinodeNodeIPSharingUpdated = "node.k8s.linode.com/ip-sharing-updated" + + NodeBalancerBackendIPv4Range = "service.beta.kubernetes.io/linode-loadbalancer-backend-ipv4-range" + + NodeBalancerBackendVPCName = "service.beta.kubernetes.io/linode-loadbalancer-backend-vpc-name" + NodeBalancerBackendSubnetName = "service.beta.kubernetes.io/linode-loadbalancer-backend-subnet-name" ) diff --git a/cloud/linode/cilium_loadbalancers.go b/cloud/linode/cilium_loadbalancers.go index 71dc5632..c22f7317 100644 --- a/cloud/linode/cilium_loadbalancers.go +++ b/cloud/linode/cilium_loadbalancers.go @@ -13,7 +13,6 @@ import ( ciliumclient "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1" slimv1 "github.com/cilium/cilium/pkg/k8s/slim/k8s/apis/meta/v1" "github.com/google/uuid" - "github.com/linode/linode-cloud-controller-manager/cloud/annotations" "github.com/linode/linodego" v1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -23,6 +22,8 @@ import ( "k8s.io/client-go/util/retry" "k8s.io/klog/v2" "k8s.io/utils/ptr" + + "github.com/linode/linode-cloud-controller-manager/cloud/annotations" ) const ( @@ -68,6 +69,7 @@ var ( "ca-central": 15, // Toronto (Canada) "us-iad": 17, // Washington, DC (USA) } + BGPNodeSelectorFlagInputLen int = 2 ) // getExistingSharedIPsInCluster determines the list of addresses to share on nodes by checking the @@ -293,10 +295,12 @@ func (l *loadbalancers) deleteSharedIP(ctx context.Context, service *v1.Service) } svcIngress := service.Status.LoadBalancer.Ingress if len(svcIngress) > 0 && ipHolder != nil { + var nodeLinodeID int + for _, ingress := range svcIngress { // delete the shared IP on the Linodes it's shared on for _, node := range bgpNodes { - nodeLinodeID, err := parseProviderID(node.Spec.ProviderID) + nodeLinodeID, err = parseProviderID(node.Spec.ProviderID) if err != nil { return err } @@ -520,7 +524,7 @@ func (l *loadbalancers) ensureCiliumBGPPeeringPolicy(ctx context.Context) error } } else { kv := strings.Split(Options.BGPNodeSelector, "=") - if len(kv) != 2 { + if len(kv) != BGPNodeSelectorFlagInputLen { return fmt.Errorf("invalid node selector %s", Options.BGPNodeSelector) } diff --git a/cloud/linode/cilium_loadbalancers_test.go b/cloud/linode/cilium_loadbalancers_test.go index f03bfaeb..2d3aa17b 100644 --- a/cloud/linode/cilium_loadbalancers_test.go +++ b/cloud/linode/cilium_loadbalancers_test.go @@ -1,7 +1,6 @@ package linode import ( - "context" "encoding/json" "fmt" "net" @@ -10,11 +9,18 @@ import ( k8sClient "github.com/cilium/cilium/pkg/k8s/client" fakev2alpha1 "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1/fake" "github.com/golang/mock/gomock" - "github.com/linode/linode-cloud-controller-manager/cloud/linode/client/mocks" "github.com/linode/linodego" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" + + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client/mocks" +) + +const ( + clusterName string = "linodelb" + nodeSelector string = "cilium-bgp-peering=true" + dummyIP string = "45.76.101.26" ) var ( @@ -134,6 +140,7 @@ func TestCiliumCCMLoadBalancers(t *testing.T) { f: testCiliumUpdateLoadBalancerAddNodeWithNewIpHolderNamingConvention, }, } + //nolint: paralleltest // two tests use t.Setenv, which fails after t.Parallel() call for _, tc := range testCases { ctrl := gomock.NewController(t) mc := mocks.NewMockClient(ctrl) @@ -173,15 +180,19 @@ func createTestService() *v1.Service { } func addService(t *testing.T, kubeClient kubernetes.Interface, svc *v1.Service) { - _, err := kubeClient.CoreV1().Services(svc.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) + t.Helper() + + _, err := kubeClient.CoreV1().Services(svc.Namespace).Create(t.Context(), svc, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to add Service: %v", err) } } func addNodes(t *testing.T, kubeClient kubernetes.Interface, nodes []*v1.Node) { + t.Helper() + for _, node := range nodes { - _, err := kubeClient.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}) + _, err := kubeClient.CoreV1().Nodes().Create(t.Context(), node, metav1.CreateOptions{}) if err != nil { t.Fatalf("failed to add Node: %v", err) } @@ -199,8 +210,10 @@ func createNewIpHolderInstance() linodego.Instance { } func testNoBGPNodeLabel(t *testing.T, mc *mocks.MockClient) { + t.Helper() + Options.BGPNodeSelector = "" - Options.IpHolderSuffix = "linodelb" + Options.IpHolderSuffix = clusterName t.Setenv("BGP_PEER_PREFIX", "2600:3cef") svc := createTestService() newIpHolderInstance = createNewIpHolderInstance() @@ -212,12 +225,20 @@ func testNoBGPNodeLabel(t *testing.T, mc *mocks.MockClient) { lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} - rawFilter, _ := json.Marshal(filter) + rawFilter, err := json.Marshal(filter) + if err != nil { + t.Errorf("json marshal error: %v", err) + } + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(zone, Options.IpHolderSuffix)} - rawFilter, _ = json.Marshal(filter) + rawFilter, err = json.Marshal(filter) + if err != nil { + t.Errorf("json marshal error: %v", err) + } + mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) - dummySharedIP := "45.76.101.26" + dummySharedIP := dummyIP mc.EXPECT().CreateInstance(gomock.Any(), gomock.Any()).Times(1).Return(&newIpHolderInstance, nil) mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), newIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ IPv4: &linodego.InstanceIPv4Response{ @@ -238,7 +259,7 @@ func testNoBGPNodeLabel(t *testing.T, mc *mocks.MockClient) { LinodeID: 33333, }).Times(1) - lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + lbStatus, err := lb.EnsureLoadBalancer(t.Context(), clusterName, svc, nodes) if err != nil { t.Fatalf("expected a nil error, got %v", err) } @@ -248,7 +269,9 @@ func testNoBGPNodeLabel(t *testing.T, mc *mocks.MockClient) { } func testUnsupportedRegion(t *testing.T, mc *mocks.MockClient) { - Options.BGPNodeSelector = "cilium-bgp-peering=true" + t.Helper() + + Options.BGPNodeSelector = nodeSelector svc := createTestService() kubeClient, _ := k8sClient.NewFakeClientset() @@ -256,7 +279,7 @@ func testUnsupportedRegion(t *testing.T, mc *mocks.MockClient) { addService(t, kubeClient, svc) lb := &loadbalancers{mc, "us-foobar", kubeClient, ciliumClient, ciliumLBType} - lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + lbStatus, err := lb.EnsureLoadBalancer(t.Context(), clusterName, svc, nodes) if err == nil { t.Fatal("expected not nil error") } @@ -267,7 +290,7 @@ func testUnsupportedRegion(t *testing.T, mc *mocks.MockClient) { // Use BGP custom id map t.Setenv("BGP_CUSTOM_ID_MAP", "{'us-foobar': 2}") lb = &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} - lbStatus, err = lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + lbStatus, err = lb.EnsureLoadBalancer(t.Context(), clusterName, svc, nodes) if err == nil { t.Fatal("expected not nil error") } @@ -277,7 +300,9 @@ func testUnsupportedRegion(t *testing.T, mc *mocks.MockClient) { } func testCreateWithExistingIPHolderWithOldIpHolderNamingConvention(t *testing.T, mc *mocks.MockClient) { - Options.BGPNodeSelector = "cilium-bgp-peering=true" + t.Helper() + + Options.BGPNodeSelector = nodeSelector svc := createTestService() newIpHolderInstance = createNewIpHolderInstance() @@ -288,9 +313,12 @@ func testCreateWithExistingIPHolderWithOldIpHolderNamingConvention(t *testing.T, lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} - rawFilter, _ := json.Marshal(filter) + rawFilter, err := json.Marshal(filter) + if err != nil { + t.Errorf("json marshal error: %v", err) + } mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{oldIpHolderInstance}, nil) - dummySharedIP := "45.76.101.26" + dummySharedIP := dummyIP mc.EXPECT().AddInstanceIPAddress(gomock.Any(), oldIpHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil) mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), oldIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ IPv4: &linodego.InstanceIPv4Response{ @@ -306,7 +334,7 @@ func testCreateWithExistingIPHolderWithOldIpHolderNamingConvention(t *testing.T, LinodeID: 22222, }).Times(1) - lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + lbStatus, err := lb.EnsureLoadBalancer(t.Context(), clusterName, svc, nodes) if err != nil { t.Fatalf("expected a nil error, got %v", err) } @@ -316,8 +344,10 @@ func testCreateWithExistingIPHolderWithOldIpHolderNamingConvention(t *testing.T, } func testCreateWithExistingIPHolderWithNewIpHolderNamingConvention(t *testing.T, mc *mocks.MockClient) { - Options.BGPNodeSelector = "cilium-bgp-peering=true" - Options.IpHolderSuffix = "linodelb" + t.Helper() + + Options.BGPNodeSelector = nodeSelector + Options.IpHolderSuffix = clusterName svc := createTestService() newIpHolderInstance = createNewIpHolderInstance() @@ -328,9 +358,12 @@ func testCreateWithExistingIPHolderWithNewIpHolderNamingConvention(t *testing.T, lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} - rawFilter, _ := json.Marshal(filter) + rawFilter, err := json.Marshal(filter) + if err != nil { + t.Errorf("json marshal error: %v", err) + } mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{oldIpHolderInstance}, nil) - dummySharedIP := "45.76.101.26" + dummySharedIP := dummyIP mc.EXPECT().AddInstanceIPAddress(gomock.Any(), oldIpHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil) mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), oldIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ IPv4: &linodego.InstanceIPv4Response{ @@ -346,7 +379,7 @@ func testCreateWithExistingIPHolderWithNewIpHolderNamingConvention(t *testing.T, LinodeID: 22222, }).Times(1) - lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + lbStatus, err := lb.EnsureLoadBalancer(t.Context(), clusterName, svc, nodes) if err != nil { t.Fatalf("expected a nil error, got %v", err) } @@ -356,7 +389,9 @@ func testCreateWithExistingIPHolderWithNewIpHolderNamingConvention(t *testing.T, } func testCreateWithExistingIPHolderWithNewIpHolderNamingConventionUsingLongSuffix(t *testing.T, mc *mocks.MockClient) { - Options.BGPNodeSelector = "cilium-bgp-peering=true" + t.Helper() + + Options.BGPNodeSelector = nodeSelector Options.IpHolderSuffix = "OaTJrRuufacHVougjwkpBpmstiqvswvBNEMWXsRYfMBTCkKIUTXpbGIcIbDWSQp" svc := createTestService() newIpHolderInstance = createNewIpHolderInstance() @@ -368,9 +403,12 @@ func testCreateWithExistingIPHolderWithNewIpHolderNamingConventionUsingLongSuffi lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} - rawFilter, _ := json.Marshal(filter) + rawFilter, err := json.Marshal(filter) + if err != nil { + t.Errorf("json marshal error: %v", err) + } mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{oldIpHolderInstance}, nil) - dummySharedIP := "45.76.101.26" + dummySharedIP := dummyIP mc.EXPECT().AddInstanceIPAddress(gomock.Any(), oldIpHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil) mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), oldIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ IPv4: &linodego.InstanceIPv4Response{ @@ -386,7 +424,7 @@ func testCreateWithExistingIPHolderWithNewIpHolderNamingConventionUsingLongSuffi LinodeID: 22222, }).Times(1) - lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + lbStatus, err := lb.EnsureLoadBalancer(t.Context(), clusterName, svc, nodes) if err != nil { t.Fatalf("expected a nil error, got %v", err) } @@ -396,7 +434,9 @@ func testCreateWithExistingIPHolderWithNewIpHolderNamingConventionUsingLongSuffi } func testCreateWithNoExistingIPHolderUsingNoSuffix(t *testing.T, mc *mocks.MockClient) { - Options.BGPNodeSelector = "cilium-bgp-peering=true" + t.Helper() + + Options.BGPNodeSelector = nodeSelector Options.IpHolderSuffix = "" svc := createTestService() newIpHolderInstance = createNewIpHolderInstance() @@ -408,12 +448,18 @@ func testCreateWithNoExistingIPHolderUsingNoSuffix(t *testing.T, mc *mocks.MockC lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} - rawFilter, _ := json.Marshal(filter) + rawFilter, err := json.Marshal(filter) + if err != nil { + t.Errorf("json marshal error: %v", err) + } mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(zone, Options.IpHolderSuffix)} - rawFilter, _ = json.Marshal(filter) + rawFilter, err = json.Marshal(filter) + if err != nil { + t.Errorf("json marshal error: %v", err) + } mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) - dummySharedIP := "45.76.101.26" + dummySharedIP := dummyIP mc.EXPECT().CreateInstance(gomock.Any(), gomock.Any()).Times(1).Return(&newIpHolderInstance, nil) mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), newIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ IPv4: &linodego.InstanceIPv4Response{ @@ -430,7 +476,7 @@ func testCreateWithNoExistingIPHolderUsingNoSuffix(t *testing.T, mc *mocks.MockC LinodeID: 22222, }).Times(1) - lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + lbStatus, err := lb.EnsureLoadBalancer(t.Context(), clusterName, svc, nodes) if err != nil { t.Fatalf("expected a nil error, got %v", err) } @@ -440,8 +486,10 @@ func testCreateWithNoExistingIPHolderUsingNoSuffix(t *testing.T, mc *mocks.MockC } func testCreateWithNoExistingIPHolderUsingShortSuffix(t *testing.T, mc *mocks.MockClient) { - Options.BGPNodeSelector = "cilium-bgp-peering=true" - Options.IpHolderSuffix = "linodelb" + t.Helper() + + Options.BGPNodeSelector = nodeSelector + Options.IpHolderSuffix = clusterName svc := createTestService() newIpHolderInstance = createNewIpHolderInstance() @@ -452,12 +500,18 @@ func testCreateWithNoExistingIPHolderUsingShortSuffix(t *testing.T, mc *mocks.Mo lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} - rawFilter, _ := json.Marshal(filter) + rawFilter, err := json.Marshal(filter) + if err != nil { + t.Errorf("json marshal error: %v", err) + } mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(zone, Options.IpHolderSuffix)} - rawFilter, _ = json.Marshal(filter) + rawFilter, err = json.Marshal(filter) + if err != nil { + t.Errorf("json marshal error: %v", err) + } mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) - dummySharedIP := "45.76.101.26" + dummySharedIP := dummyIP mc.EXPECT().CreateInstance(gomock.Any(), gomock.Any()).Times(1).Return(&newIpHolderInstance, nil) mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), newIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ IPv4: &linodego.InstanceIPv4Response{ @@ -474,7 +528,7 @@ func testCreateWithNoExistingIPHolderUsingShortSuffix(t *testing.T, mc *mocks.Mo LinodeID: 22222, }).Times(1) - lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + lbStatus, err := lb.EnsureLoadBalancer(t.Context(), clusterName, svc, nodes) if err != nil { t.Fatalf("expected a nil error, got %v", err) } @@ -484,7 +538,9 @@ func testCreateWithNoExistingIPHolderUsingShortSuffix(t *testing.T, mc *mocks.Mo } func testCreateWithNoExistingIPHolderUsingLongSuffix(t *testing.T, mc *mocks.MockClient) { - Options.BGPNodeSelector = "cilium-bgp-peering=true" + t.Helper() + + Options.BGPNodeSelector = nodeSelector Options.IpHolderSuffix = "OaTJrRuufacHVougjwkpBpmstiqvswvBNEMWXsRYfMBTCkKIUTXpbGIcIbDWSQp" svc := createTestService() newIpHolderInstance = createNewIpHolderInstance() @@ -496,12 +552,18 @@ func testCreateWithNoExistingIPHolderUsingLongSuffix(t *testing.T, mc *mocks.Moc lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} - rawFilter, _ := json.Marshal(filter) + rawFilter, err := json.Marshal(filter) + if err != nil { + t.Errorf("json marshal error: %v", err) + } mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(zone, Options.IpHolderSuffix)} - rawFilter, _ = json.Marshal(filter) + rawFilter, err = json.Marshal(filter) + if err != nil { + t.Errorf("json marshal error: %v", err) + } mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) - dummySharedIP := "45.76.101.26" + dummySharedIP := dummyIP mc.EXPECT().CreateInstance(gomock.Any(), gomock.Any()).Times(1).Return(&newIpHolderInstance, nil) mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), newIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ IPv4: &linodego.InstanceIPv4Response{ @@ -518,7 +580,7 @@ func testCreateWithNoExistingIPHolderUsingLongSuffix(t *testing.T, mc *mocks.Moc LinodeID: 22222, }).Times(1) - lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + lbStatus, err := lb.EnsureLoadBalancer(t.Context(), clusterName, svc, nodes) if err != nil { t.Fatalf("expected a nil error, got %v", err) } @@ -528,7 +590,9 @@ func testCreateWithNoExistingIPHolderUsingLongSuffix(t *testing.T, mc *mocks.Moc } func testEnsureCiliumLoadBalancerDeletedWithOldIpHolderNamingConvention(t *testing.T, mc *mocks.MockClient) { - Options.BGPNodeSelector = "cilium-bgp-peering=true" + t.Helper() + + Options.BGPNodeSelector = nodeSelector svc := createTestService() kubeClient, _ := k8sClient.NewFakeClientset() @@ -537,25 +601,30 @@ func testEnsureCiliumLoadBalancerDeletedWithOldIpHolderNamingConvention(t *testi addNodes(t, kubeClient, nodes) lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} - dummySharedIP := "45.76.101.26" + dummySharedIP := dummyIP svc.Status.LoadBalancer = v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: dummySharedIP}}} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} - rawFilter, _ := json.Marshal(filter) + rawFilter, err := json.Marshal(filter) + if err != nil { + t.Errorf("json marshal error: %v", err) + } mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{oldIpHolderInstance}, nil) mc.EXPECT().DeleteInstanceIPAddress(gomock.Any(), 11111, dummySharedIP).Times(1).Return(nil) mc.EXPECT().DeleteInstanceIPAddress(gomock.Any(), 22222, dummySharedIP).Times(1).Return(nil) mc.EXPECT().DeleteInstanceIPAddress(gomock.Any(), oldIpHolderInstance.ID, dummySharedIP).Times(1).Return(nil) - err := lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) + err = lb.EnsureLoadBalancerDeleted(t.Context(), clusterName, svc) if err != nil { t.Fatalf("expected a nil error, got %v", err) } } func testEnsureCiliumLoadBalancerDeletedWithNewIpHolderNamingConvention(t *testing.T, mc *mocks.MockClient) { - Options.BGPNodeSelector = "cilium-bgp-peering=true" - Options.IpHolderSuffix = "linodelb" + t.Helper() + + Options.BGPNodeSelector = nodeSelector + Options.IpHolderSuffix = clusterName svc := createTestService() newIpHolderInstance = createNewIpHolderInstance() @@ -565,27 +634,35 @@ func testEnsureCiliumLoadBalancerDeletedWithNewIpHolderNamingConvention(t *testi addNodes(t, kubeClient, nodes) lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} - dummySharedIP := "45.76.101.26" + dummySharedIP := dummyIP svc.Status.LoadBalancer = v1.LoadBalancerStatus{Ingress: []v1.LoadBalancerIngress{{IP: dummySharedIP}}} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} - rawFilter, _ := json.Marshal(filter) + rawFilter, err := json.Marshal(filter) + if err != nil { + t.Errorf("json marshal error: %v", err) + } mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(zone, Options.IpHolderSuffix)} - rawFilter, _ = json.Marshal(filter) + rawFilter, err = json.Marshal(filter) + if err != nil { + t.Errorf("json marshal error: %v", err) + } mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{newIpHolderInstance}, nil) mc.EXPECT().DeleteInstanceIPAddress(gomock.Any(), 11111, dummySharedIP).Times(1).Return(nil) mc.EXPECT().DeleteInstanceIPAddress(gomock.Any(), 22222, dummySharedIP).Times(1).Return(nil) mc.EXPECT().DeleteInstanceIPAddress(gomock.Any(), newIpHolderInstance.ID, dummySharedIP).Times(1).Return(nil) - err := lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) + err = lb.EnsureLoadBalancerDeleted(t.Context(), clusterName, svc) if err != nil { t.Fatalf("expected a nil error, got %v", err) } } func testCiliumUpdateLoadBalancerAddNodeWithOldIpHolderNamingConvention(t *testing.T, mc *mocks.MockClient) { - Options.BGPNodeSelector = "cilium-bgp-peering=true" + t.Helper() + + Options.BGPNodeSelector = nodeSelector svc := createTestService() kubeClient, _ := k8sClient.NewFakeClientset() @@ -595,9 +672,12 @@ func testCiliumUpdateLoadBalancerAddNodeWithOldIpHolderNamingConvention(t *testi lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} - rawFilter, _ := json.Marshal(filter) + rawFilter, err := json.Marshal(filter) + if err != nil { + t.Errorf("json marshal error: %v", err) + } mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{oldIpHolderInstance}, nil) - dummySharedIP := "45.76.101.26" + dummySharedIP := dummyIP mc.EXPECT().AddInstanceIPAddress(gomock.Any(), oldIpHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil) mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), oldIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ IPv4: &linodego.InstanceIPv4Response{ @@ -613,7 +693,7 @@ func testCiliumUpdateLoadBalancerAddNodeWithOldIpHolderNamingConvention(t *testi LinodeID: 22222, }).Times(1) - lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + lbStatus, err := lb.EnsureLoadBalancer(t.Context(), clusterName, svc, nodes) if err != nil { t.Fatalf("expected a nil error, got %v", err) } @@ -634,15 +714,17 @@ func testCiliumUpdateLoadBalancerAddNodeWithOldIpHolderNamingConvention(t *testi }).Times(1) addNodes(t, kubeClient, additionalNodes) - err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, additionalNodes) + err = lb.UpdateLoadBalancer(t.Context(), clusterName, svc, additionalNodes) if err != nil { t.Fatalf("expected a nil error, got %v", err) } } func testCiliumUpdateLoadBalancerAddNodeWithNewIpHolderNamingConvention(t *testing.T, mc *mocks.MockClient) { - Options.BGPNodeSelector = "cilium-bgp-peering=true" - Options.IpHolderSuffix = "linodelb" + t.Helper() + + Options.BGPNodeSelector = nodeSelector + Options.IpHolderSuffix = clusterName svc := createTestService() newIpHolderInstance = createNewIpHolderInstance() @@ -653,12 +735,18 @@ func testCiliumUpdateLoadBalancerAddNodeWithNewIpHolderNamingConvention(t *testi lb := &loadbalancers{mc, zone, kubeClient, ciliumClient, ciliumLBType} filter := map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} - rawFilter, _ := json.Marshal(filter) + rawFilter, err := json.Marshal(filter) + if err != nil { + t.Errorf("json marshal error: %v", err) + } mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(zone, Options.IpHolderSuffix)} - rawFilter, _ = json.Marshal(filter) + rawFilter, err = json.Marshal(filter) + if err != nil { + t.Errorf("json marshal error: %v", err) + } mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{newIpHolderInstance}, nil) - dummySharedIP := "45.76.101.26" + dummySharedIP := dummyIP mc.EXPECT().AddInstanceIPAddress(gomock.Any(), newIpHolderInstance.ID, true).Times(1).Return(&linodego.InstanceIP{Address: dummySharedIP}, nil) mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), newIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ IPv4: &linodego.InstanceIPv4Response{ @@ -674,7 +762,7 @@ func testCiliumUpdateLoadBalancerAddNodeWithNewIpHolderNamingConvention(t *testi LinodeID: 22222, }).Times(1) - lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + lbStatus, err := lb.EnsureLoadBalancer(t.Context(), clusterName, svc, nodes) if err != nil { t.Fatalf("expected a nil error, got %v", err) } @@ -684,10 +772,16 @@ func testCiliumUpdateLoadBalancerAddNodeWithNewIpHolderNamingConvention(t *testi // Now add another node to the cluster and assert that it gets the shared IP filter = map[string]string{"label": fmt.Sprintf("%s-%s", ipHolderLabelPrefix, zone)} - rawFilter, _ = json.Marshal(filter) + rawFilter, err = json.Marshal(filter) + if err != nil { + t.Errorf("json marshal error: %v", err) + } mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{}, nil) filter = map[string]string{"label": generateClusterScopedIPHolderLinodeName(zone, Options.IpHolderSuffix)} - rawFilter, _ = json.Marshal(filter) + rawFilter, err = json.Marshal(filter) + if err != nil { + t.Errorf("json marshal error: %v", err) + } mc.EXPECT().ListInstances(gomock.Any(), linodego.NewListOptions(1, string(rawFilter))).Times(1).Return([]linodego.Instance{newIpHolderInstance}, nil) mc.EXPECT().GetInstanceIPAddresses(gomock.Any(), newIpHolderInstance.ID).Times(1).Return(&linodego.InstanceIPAddressResponse{ @@ -701,7 +795,7 @@ func testCiliumUpdateLoadBalancerAddNodeWithNewIpHolderNamingConvention(t *testi }).Times(1) addNodes(t, kubeClient, additionalNodes) - err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, additionalNodes) + err = lb.UpdateLoadBalancer(t.Context(), clusterName, svc, additionalNodes) if err != nil { t.Fatalf("expected a nil error, got %v", err) } diff --git a/cloud/linode/client/client.go b/cloud/linode/client/client.go index 6599839b..609f88e1 100644 --- a/cloud/linode/client/client.go +++ b/cloud/linode/client/client.go @@ -11,9 +11,10 @@ import ( "os" "time" - _ "github.com/hexdigest/gowrap" "github.com/linode/linodego" "k8s.io/klog/v2" + + _ "github.com/hexdigest/gowrap" ) const ( @@ -35,6 +36,7 @@ type Client interface { ListVPCs(context.Context, *linodego.ListOptions) ([]linodego.VPC, error) ListVPCIPAddresses(context.Context, int, *linodego.ListOptions) ([]linodego.VPCIP, error) + ListVPCSubnets(context.Context, int, *linodego.ListOptions) ([]linodego.VPCSubnet, error) CreateNodeBalancer(context.Context, linodego.NodeBalancerCreateOptions) (*linodego.NodeBalancer, error) GetNodeBalancer(context.Context, int) (*linodego.NodeBalancer, error) diff --git a/cloud/linode/client/client_with_metrics.go b/cloud/linode/client/client_with_metrics.go index d87e0bbd..4812e296 100644 --- a/cloud/linode/client/client_with_metrics.go +++ b/cloud/linode/client/client_with_metrics.go @@ -332,6 +332,19 @@ func (_d ClientWithPrometheus) ListVPCIPAddresses(ctx context.Context, i1 int, l return _d.base.ListVPCIPAddresses(ctx, i1, lp1) } +// ListVPCSubnets implements Client +func (_d ClientWithPrometheus) ListVPCSubnets(ctx context.Context, i1 int, lp1 *linodego.ListOptions) (va1 []linodego.VPCSubnet, err error) { + defer func() { + result := "ok" + if err != nil { + result = "error" + } + + ClientMethodCounterVec.WithLabelValues("ListVPCSubnets", result).Inc() + }() + return _d.base.ListVPCSubnets(ctx, i1, lp1) +} + // ListVPCs implements Client func (_d ClientWithPrometheus) ListVPCs(ctx context.Context, lp1 *linodego.ListOptions) (va1 []linodego.VPC, err error) { defer func() { diff --git a/cloud/linode/client/mocks/mock_client.go b/cloud/linode/client/mocks/mock_client.go index c986aef2..bea9ea02 100644 --- a/cloud/linode/client/mocks/mock_client.go +++ b/cloud/linode/client/mocks/mock_client.go @@ -375,6 +375,21 @@ func (mr *MockClientMockRecorder) ListVPCIPAddresses(arg0, arg1, arg2 interface{ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListVPCIPAddresses", reflect.TypeOf((*MockClient)(nil).ListVPCIPAddresses), arg0, arg1, arg2) } +// ListVPCSubnets mocks base method. +func (m *MockClient) ListVPCSubnets(arg0 context.Context, arg1 int, arg2 *linodego.ListOptions) ([]linodego.VPCSubnet, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListVPCSubnets", arg0, arg1, arg2) + ret0, _ := ret[0].([]linodego.VPCSubnet) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListVPCSubnets indicates an expected call of ListVPCSubnets. +func (mr *MockClientMockRecorder) ListVPCSubnets(arg0, arg1, arg2 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListVPCSubnets", reflect.TypeOf((*MockClient)(nil).ListVPCSubnets), arg0, arg1, arg2) +} + // ListVPCs mocks base method. func (m *MockClient) ListVPCs(arg0 context.Context, arg1 *linodego.ListOptions) ([]linodego.VPC, error) { m.ctrl.T.Helper() diff --git a/cloud/linode/cloud.go b/cloud/linode/cloud.go index 8ed3a18e..82747537 100644 --- a/cloud/linode/cloud.go +++ b/cloud/linode/cloud.go @@ -39,14 +39,18 @@ var Options struct { EnableRouteController bool EnableTokenHealthChecker bool // Deprecated: use VPCNames instead - VPCName string - VPCNames string - LoadBalancerType string - BGPNodeSelector string - IpHolderSuffix string - LinodeExternalNetwork *net.IPNet - NodeBalancerTags []string - GlobalStopChannel chan<- struct{} + VPCName string + VPCNames string + SubnetNames string + LoadBalancerType string + BGPNodeSelector string + IpHolderSuffix string + LinodeExternalNetwork *net.IPNet + NodeBalancerTags []string + DefaultNBType string + NodeBalancerBackendIPv4Subnet string + GlobalStopChannel chan<- struct{} + EnableIPv6ForLoadBalancers bool } type linodeCloud struct { @@ -57,7 +61,10 @@ type linodeCloud struct { linodeTokenHealthChecker *healthChecker } -var instanceCache *instances +var ( + instanceCache *instances + ipHolderCharLimit int = 23 +) func init() { registerMetrics() @@ -73,7 +80,7 @@ func init() { func newLinodeClientWithPrometheus(apiToken string, timeout time.Duration) (client.Client, error) { linodeClient, err := client.New(apiToken, timeout) if err != nil { - return nil, fmt.Errorf("client was not created succesfully: %w", err) + return nil, fmt.Errorf("client was not created successfully: %w", err) } if Options.LinodeGoDebug { @@ -111,7 +118,8 @@ func newCloud() (cloudprovider.Interface, error) { var healthChecker *healthChecker if Options.EnableTokenHealthChecker { - authenticated, err := client.CheckClientAuthenticated(context.TODO(), linodeClient) + var authenticated bool + authenticated, err = client.CheckClientAuthenticated(context.TODO(), linodeClient) if err != nil { return nil, fmt.Errorf("linode client authenticated connection error: %w", err) } @@ -132,6 +140,12 @@ func newCloud() (cloudprovider.Interface, error) { Options.VPCNames = Options.VPCName } + // SubnetNames can't be used without VPCNames also being set + if Options.SubnetNames != "" && Options.VPCNames == "" { + klog.Warningf("failed to set flag subnet-names: vpc-names must be set to a non-empty value") + Options.SubnetNames = "" + } + instanceCache = newInstances(linodeClient) routes, err := newRoutes(linodeClient, instanceCache) if err != nil { @@ -150,8 +164,8 @@ func newCloud() (cloudprovider.Interface, error) { klog.Infof("Using IP holder suffix '%s'\n", Options.IpHolderSuffix) } - if len(Options.IpHolderSuffix) > 23 { - msg := fmt.Sprintf("ip-holder-suffix must be 23 characters or less: %s is %d characters\n", Options.IpHolderSuffix, len(Options.IpHolderSuffix)) + if len(Options.IpHolderSuffix) > ipHolderCharLimit { + msg := fmt.Sprintf("ip-holder-suffix must be %d characters or less: %s is %d characters\n", ipHolderCharLimit, Options.IpHolderSuffix, len(Options.IpHolderSuffix)) klog.Error(msg) return nil, fmt.Errorf("%s", msg) } @@ -177,7 +191,12 @@ func (c *linodeCloud) Initialize(clientBuilder cloudprovider.ControllerClientBui go c.linodeTokenHealthChecker.Run(stopCh) } - serviceController := newServiceController(c.loadbalancers.(*loadbalancers), serviceInformer) + lb, assertion := c.loadbalancers.(*loadbalancers) + if !assertion { + klog.Error("type assertion during Initialize() failed") + return + } + serviceController := newServiceController(lb, serviceInformer) go serviceController.Run(stopCh) nodeController := newNodeController(kubeclient, c.client, nodeInformer, instanceCache) diff --git a/cloud/linode/cloud_test.go b/cloud/linode/cloud_test.go index c6f2c97d..618250fc 100644 --- a/cloud/linode/cloud_test.go +++ b/cloud/linode/cloud_test.go @@ -6,9 +6,11 @@ import ( "testing" "github.com/golang/mock/gomock" - "github.com/linode/linode-cloud-controller-manager/cloud/linode/client/mocks" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" cloudprovider "k8s.io/cloud-provider" + + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client/mocks" ) func TestNewCloudRouteControllerDisabled(t *testing.T) { @@ -74,8 +76,8 @@ func TestNewCloud(t *testing.T) { Options.VPCNames = "" }() _, err := newCloud() - assert.NoError(t, err, "expected no error if deprecated flag vpcname is set") - assert.Equal(t, Options.VPCNames, "tt", "expected vpcnames to be set to vpcname") + require.NoError(t, err, "expected no error if deprecated flag vpcname is set") + assert.Equal(t, "tt", Options.VPCNames, "expected vpcnames to be set to vpcname") }) t.Run("should fail if incorrect loadbalancertype is set", func(t *testing.T) { diff --git a/cloud/linode/fake_linode_test.go b/cloud/linode/fake_linode_test.go index aeb069d8..47e990c6 100644 --- a/cloud/linode/fake_linode_test.go +++ b/cloud/linode/fake_linode_test.go @@ -19,12 +19,15 @@ import ( const apiVersion = "v4" type fakeAPI struct { - t *testing.T - nb map[string]*linodego.NodeBalancer - nbc map[string]*linodego.NodeBalancerConfig - nbn map[string]*linodego.NodeBalancerNode - fw map[int]*linodego.Firewall // map of firewallID -> firewall - fwd map[int]map[int]*linodego.FirewallDevice // map of firewallID -> firewallDeviceID:FirewallDevice + t *testing.T + nb map[string]*linodego.NodeBalancer + nbc map[string]*linodego.NodeBalancerConfig + nbn map[string]*linodego.NodeBalancerNode + fw map[int]*linodego.Firewall // map of firewallID -> firewall + fwd map[int]map[int]*linodego.FirewallDevice // map of firewallID -> firewallDeviceID:FirewallDevice + nbvpcc map[string]*linodego.NodeBalancerVPCConfig + vpc map[int]*linodego.VPC + subnet map[int]*linodego.VPCSubnet requests map[fakeRequest]struct{} mux *http.ServeMux @@ -37,6 +40,8 @@ type fakeRequest struct { } func newFake(t *testing.T) *fakeAPI { + t.Helper() + fake := &fakeAPI{ t: t, nb: make(map[string]*linodego.NodeBalancer), @@ -44,6 +49,9 @@ func newFake(t *testing.T) *fakeAPI { nbn: make(map[string]*linodego.NodeBalancerNode), fw: make(map[int]*linodego.Firewall), fwd: make(map[int]map[int]*linodego.FirewallDevice), + nbvpcc: make(map[string]*linodego.NodeBalancerVPCConfig), + vpc: make(map[int]*linodego.VPC), + subnet: make(map[int]*linodego.VPCSubnet), requests: make(map[fakeRequest]struct{}), mux: http.NewServeMux(), } @@ -117,10 +125,58 @@ func (f *fakeAPI) setupRoutes() { _, _ = w.Write(rr) }) + f.mux.HandleFunc("GET /v4/vpcs", func(w http.ResponseWriter, r *http.Request) { + res := 0 + data := []linodego.VPC{} + filter := r.Header.Get("X-Filter") + if filter == "" { + for _, v := range f.vpc { + data = append(data, *v) + } + } else { + var fs map[string]string + err := json.Unmarshal([]byte(filter), &fs) + if err != nil { + f.t.Fatal(err) + } + for _, v := range f.vpc { + if v.Label != "" && fs["label"] != "" && v.Label == fs["label"] { + data = append(data, *v) + } + } + } + + resp := paginatedResponse[linodego.VPC]{ + Page: 1, + Pages: 1, + Results: res, + Data: data, + } + rr, _ := json.Marshal(resp) + _, _ = w.Write(rr) + }) + + f.mux.HandleFunc("GET /v4/vpcs/{vpcId}/subnets", func(w http.ResponseWriter, r *http.Request) { + res := 0 + vpcID, err := strconv.Atoi(r.PathValue("vpcId")) + if err != nil { + f.t.Fatal(err) + } + + resp := paginatedResponse[linodego.VPCSubnet]{ + Page: 1, + Pages: 1, + Results: res, + Data: f.vpc[vpcID].Subnets, + } + rr, _ := json.Marshal(resp) + _, _ = w.Write(rr) + }) + f.mux.HandleFunc("GET /v4/nodebalancers/{nodeBalancerId}", func(w http.ResponseWriter, r *http.Request) { nb, found := f.nb[r.PathValue("nodeBalancerId")] if !found { - w.WriteHeader(404) + w.WriteHeader(http.StatusNotFound) resp := linodego.APIError{ Errors: []linodego.APIErrorReason{ {Reason: "Not Found"}, @@ -230,7 +286,7 @@ func (f *fakeAPI) setupRoutes() { firewallDevices, found := f.fwd[fwdId] if !found { - w.WriteHeader(404) + w.WriteHeader(http.StatusNotFound) resp := linodego.APIError{ Errors: []linodego.APIErrorReason{ {Reason: "Not Found"}, @@ -462,6 +518,54 @@ func (f *fakeAPI) setupRoutes() { _, _ = w.Write(resp) }) + f.mux.HandleFunc("POST /v4/vpcs", func(w http.ResponseWriter, r *http.Request) { + vco := linodego.VPCCreateOptions{} + if err := json.NewDecoder(r.Body).Decode(&vco); err != nil { + f.t.Fatal(err) + } + + subnets := []linodego.VPCSubnet{} + for _, s := range vco.Subnets { + subnet := linodego.VPCSubnet{ + ID: rand.Intn(9999), + IPv4: s.IPv4, + Label: s.Label, + } + subnets = append(subnets, subnet) + f.subnet[subnet.ID] = &subnet + } + vpc := linodego.VPC{ + ID: rand.Intn(9999), + Label: vco.Label, + Description: vco.Description, + Region: vco.Region, + Subnets: subnets, + } + + f.vpc[vpc.ID] = &vpc + resp, err := json.Marshal(vpc) + if err != nil { + f.t.Fatal(err) + } + _, _ = w.Write(resp) + }) + + f.mux.HandleFunc("DELETE /v4/vpcs/{vpcId}", func(w http.ResponseWriter, r *http.Request) { + vpcid, err := strconv.Atoi(r.PathValue("vpcId")) + if err != nil { + f.t.Fatal(err) + } + + for k, v := range f.vpc { + if v.ID == vpcid { + for _, s := range v.Subnets { + delete(f.subnet, s.ID) + } + delete(f.vpc, k) + } + } + }) + f.mux.HandleFunc("POST /v4/networking/firewalls/{firewallId}/devices", func(w http.ResponseWriter, r *http.Request) { fdco := linodego.FirewallDeviceCreateOptions{} if err := json.NewDecoder(r.Body).Decode(&fdco); err != nil { @@ -626,7 +730,7 @@ func (f *fakeAPI) setupRoutes() { return } - w.WriteHeader(404) + w.WriteHeader(http.StatusNotFound) resp := linodego.APIError{ Errors: []linodego.APIErrorReason{ {Reason: "Not Found"}, @@ -665,7 +769,7 @@ func (f *fakeAPI) setupRoutes() { return } - w.WriteHeader(404) + w.WriteHeader(http.StatusNotFound) resp := linodego.APIError{ Errors: []linodego.APIErrorReason{ {Reason: "Not Found"}, diff --git a/cloud/linode/firewall/firewalls.go b/cloud/linode/firewall/firewalls.go index e818dbfe..1e700984 100644 --- a/cloud/linode/firewall/firewalls.go +++ b/cloud/linode/firewall/firewalls.go @@ -8,9 +8,8 @@ import ( "strconv" "strings" - "golang.org/x/exp/slices" - "github.com/linode/linodego" + "golang.org/x/exp/slices" v1 "k8s.io/api/core/v1" "k8s.io/klog/v2" @@ -20,8 +19,11 @@ import ( const ( maxFirewallRuleLabelLen = 32 + maxFirewallRuleDescLen = 100 maxIPsPerFirewall = 255 maxRulesPerFirewall = 25 + accept = "ACCEPT" + drop = "DROP" ) var ( @@ -134,7 +136,7 @@ func ruleChanged(old linodego.FirewallRuleSet, newACL aclConfig) bool { var ips *linodego.NetworkAddresses if newACL.AllowList != nil { // this is a allowList, this means that the rules should have `DROP` as inboundpolicy - if old.InboundPolicy != "DROP" { + if old.InboundPolicy != drop { return true } if (newACL.AllowList.IPv4 != nil || newACL.AllowList.IPv6 != nil) && len(old.Inbound) == 0 { @@ -144,7 +146,7 @@ func ruleChanged(old linodego.FirewallRuleSet, newACL aclConfig) bool { } if newACL.DenyList != nil { - if old.InboundPolicy != "ACCEPT" { + if old.InboundPolicy != accept { return true } @@ -183,6 +185,16 @@ func chunkIPs(ips []string) [][]string { return chunks } +// truncateFWRuleDesc truncates the description to maxFirewallRuleDescLen if it exceeds the limit. +func truncateFWRuleDesc(desc string) string { + if len(desc) > maxFirewallRuleDescLen { + newDesc := desc[0:maxFirewallRuleDescLen-3] + "..." + klog.Infof("Firewall rule description '%s' is too long. Stripping it to '%s'", desc, newDesc) + desc = newDesc + } + return desc +} + // processACL takes the IPs, aclType, label etc and formats them into the passed linodego.FirewallCreateOptions pointer. func processACL(fwcreateOpts *linodego.FirewallCreateOptions, aclType, label, svcName, ports string, ips linodego.NetworkAddresses) error { ruleLabel := fmt.Sprintf("%s-%s", aclType, svcName) @@ -205,10 +217,11 @@ func processACL(fwcreateOpts *linodego.FirewallCreateOptions, aclType, label, sv ipv4chunks := chunkIPs(ipv4s) for i, chunk := range ipv4chunks { v4chunk := chunk + desc := fmt.Sprintf("Rule %d, Created by linode-ccm: %s, for %s", i, label, svcName) fwcreateOpts.Rules.Inbound = append(fwcreateOpts.Rules.Inbound, linodego.FirewallRule{ Action: aclType, Label: ruleLabel, - Description: fmt.Sprintf("Rule %d, Created by linode-ccm: %s, for %s", i, label, svcName), + Description: truncateFWRuleDesc(desc), Protocol: linodego.TCP, // Nodebalancers support only TCP. Ports: ports, Addresses: linodego.NetworkAddresses{IPv4: &v4chunk}, @@ -218,33 +231,35 @@ func processACL(fwcreateOpts *linodego.FirewallCreateOptions, aclType, label, sv ipv6chunks := chunkIPs(ipv6s) for i, chunk := range ipv6chunks { v6chunk := chunk + desc := fmt.Sprintf("Rule %d, Created by linode-ccm: %s, for %s", i, label, svcName) fwcreateOpts.Rules.Inbound = append(fwcreateOpts.Rules.Inbound, linodego.FirewallRule{ Action: aclType, Label: ruleLabel, - Description: fmt.Sprintf("Rule %d, Created by linode-ccm: %s, for %s", i, label, svcName), + Description: truncateFWRuleDesc(desc), Protocol: linodego.TCP, // Nodebalancers support only TCP. Ports: ports, Addresses: linodego.NetworkAddresses{IPv6: &v6chunk}, }) } } else { + desc := fmt.Sprintf("Created by linode-ccm: %s, for %s", label, svcName) fwcreateOpts.Rules.Inbound = append(fwcreateOpts.Rules.Inbound, linodego.FirewallRule{ Action: aclType, Label: ruleLabel, - Description: fmt.Sprintf("Created by linode-ccm: %s, for %s", label, svcName), + Description: truncateFWRuleDesc(desc), Protocol: linodego.TCP, // Nodebalancers support only TCP. Ports: ports, Addresses: ips, }) } - fwcreateOpts.Rules.OutboundPolicy = "ACCEPT" - if aclType == "ACCEPT" { + fwcreateOpts.Rules.OutboundPolicy = accept + if aclType == accept { // if an allowlist is present, we drop everything else. - fwcreateOpts.Rules.InboundPolicy = "DROP" + fwcreateOpts.Rules.InboundPolicy = drop } else { // if a denylist is present, we accept everything else. - fwcreateOpts.Rules.InboundPolicy = "ACCEPT" + fwcreateOpts.Rules.InboundPolicy = accept } if len(fwcreateOpts.Rules.Inbound) > maxRulesPerFirewall { @@ -453,7 +468,7 @@ func (l *LinodeClient) updateNodeBalancerFirewallWithACL( return nil } - fwCreateOpts, err := CreateFirewallOptsForSvc(service.Name, []string{""}, service) + fwCreateOpts, err := CreateFirewallOptsForSvc(firewalls[0].Label, []string{""}, service) if err != nil { return err } @@ -480,7 +495,7 @@ func CreateFirewallOptsForSvc(label string, tags []string, svc *v1.Service) (*li servicePorts = append(servicePorts, strconv.Itoa(int(port.Port))) } - portsString := strings.Join(servicePorts[:], ",") + portsString := strings.Join(servicePorts, ",") var acl aclConfig if err := json.Unmarshal([]byte(aclString), &acl); err != nil { return nil, err @@ -490,10 +505,10 @@ func CreateFirewallOptsForSvc(label string, tags []string, svc *v1.Service) (*li return nil, ErrInvalidFWConfig } - aclType := "ACCEPT" + aclType := accept allowedIPs := acl.AllowList if acl.DenyList != nil { - aclType = "DROP" + aclType = drop allowedIPs = acl.DenyList } diff --git a/cloud/linode/health_check.go b/cloud/linode/health_check.go index dc0d0e30..458a90a9 100644 --- a/cloud/linode/health_check.go +++ b/cloud/linode/health_check.go @@ -4,9 +4,10 @@ import ( "context" "time" - "github.com/linode/linode-cloud-controller-manager/cloud/linode/client" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog/v2" + + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client" ) type healthChecker struct { diff --git a/cloud/linode/health_check_test.go b/cloud/linode/health_check_test.go index bd1570d0..b7e71972 100644 --- a/cloud/linode/health_check_test.go +++ b/cloud/linode/health_check_test.go @@ -5,8 +5,9 @@ import ( "time" "github.com/golang/mock/gomock" - "github.com/linode/linode-cloud-controller-manager/cloud/linode/client/mocks" "github.com/linode/linodego" + + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client/mocks" ) func TestHealthCheck(t *testing.T) { @@ -40,6 +41,8 @@ func TestHealthCheck(t *testing.T) { } func testSucceedingCallsToLinodeAPIHappenStopSignalNotFired(t *testing.T, client *mocks.MockClient) { + t.Helper() + writableStopCh := make(chan struct{}) readableStopCh := make(chan struct{}) @@ -61,6 +64,8 @@ func testSucceedingCallsToLinodeAPIHappenStopSignalNotFired(t *testing.T, client } func testFailingCallsToLinodeAPIHappenStopSignalFired(t *testing.T, client *mocks.MockClient) { + t.Helper() + writableStopCh := make(chan struct{}) readableStopCh := make(chan struct{}) @@ -94,6 +99,8 @@ func testFailingCallsToLinodeAPIHappenStopSignalFired(t *testing.T, client *mock } func testErrorCallsToLinodeAPIHappenStopSignalNotFired(t *testing.T, client *mocks.MockClient) { + t.Helper() + writableStopCh := make(chan struct{}) readableStopCh := make(chan struct{}) diff --git a/cloud/linode/instances.go b/cloud/linode/instances.go index c93a9fa7..0cc5940b 100644 --- a/cloud/linode/instances.go +++ b/cloud/linode/instances.go @@ -2,6 +2,7 @@ package linode import ( "context" + "errors" "fmt" "os" "slices" @@ -99,14 +100,13 @@ func (nc *nodeCache) refreshInstances(ctx context.Context, client client.Client) } newNodes := make(map[int]linodeInstance, len(instances)) - for i, instance := range instances { - + for index, instance := range instances { // if running within VPC, only store instances in cache which are part of VPC if Options.VPCNames != "" && len(vpcNodes[instance.ID]) == 0 { continue } node := linodeInstance{ - instance: &instances[i], + instance: &instances[index], ips: nc.getInstanceAddresses(instance, vpcNodes[instance.ID]), } newNodes[instance.ID] = node @@ -126,7 +126,7 @@ type instances struct { func newInstances(client client.Client) *instances { timeout := 15 if raw, ok := os.LookupEnv("LINODE_INSTANCE_CACHE_TTL"); ok { - if t, _ := strconv.Atoi(raw); t > 0 { + if t, err := strconv.Atoi(raw); t > 0 && err == nil { timeout = t } } @@ -236,7 +236,7 @@ func (i *instances) lookupLinode(ctx context.Context, node *v1.Node) (*linodego. func (i *instances) InstanceExists(ctx context.Context, node *v1.Node) (bool, error) { ctx = sentry.SetHubOnContext(ctx) if _, err := i.lookupLinode(ctx, node); err != nil { - if err == cloudprovider.InstanceNotFound { + if errors.Is(err, cloudprovider.InstanceNotFound) { return false, nil } sentry.CaptureError(ctx, err) diff --git a/cloud/linode/instances_test.go b/cloud/linode/instances_test.go index 6cd6e183..e1574a7e 100644 --- a/cloud/linode/instances_test.go +++ b/cloud/linode/instances_test.go @@ -1,7 +1,6 @@ package linode import ( - "context" "fmt" "net" "slices" @@ -10,12 +9,20 @@ import ( "testing" "github.com/golang/mock/gomock" - "github.com/linode/linode-cloud-controller-manager/cloud/linode/client/mocks" "github.com/linode/linodego" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" cloudprovider "k8s.io/cloud-provider" + + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client/mocks" +) + +const ( + instanceName string = "mock-instance" + usEast string = "us-east" + typeG6 string = "g6-standard-1" ) func nodeWithProviderID(providerID string) *v1.Node { @@ -29,7 +36,7 @@ func nodeWithName(name string) *v1.Node { } func TestInstanceExists(t *testing.T) { - ctx := context.TODO() + ctx := t.Context() ctrl := gomock.NewController(t) defer ctrl.Finish() @@ -41,7 +48,7 @@ func TestInstanceExists(t *testing.T) { client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{}, nil) exists, err := instances.InstanceExists(ctx, node) - assert.NoError(t, err) + require.NoError(t, err) assert.False(t, exists) }) @@ -52,13 +59,13 @@ func TestInstanceExists(t *testing.T) { { ID: 123, Label: "mock", - Region: "us-east", + Region: usEast, Type: "g6-standard-2", }, }, nil) exists, err := instances.InstanceExists(ctx, node) - assert.NoError(t, err) + require.NoError(t, err) assert.True(t, exists) }) @@ -72,13 +79,13 @@ func TestInstanceExists(t *testing.T) { }, nil) exists, err := instances.InstanceExists(ctx, node) - assert.NoError(t, err) + require.NoError(t, err) assert.True(t, exists) }) } func TestMetadataRetrieval(t *testing.T) { - ctx := context.TODO() + ctx := t.Context() ctrl := gomock.NewController(t) defer ctrl.Finish() @@ -94,7 +101,7 @@ func TestMetadataRetrieval(t *testing.T) { node := nodeWithName(name) meta, err := instances.InstanceMetadata(ctx, node) - assert.Nil(t, err) + require.NoError(t, err) assert.Equal(t, providerIDPrefix+strconv.Itoa(expectedInstance.ID), meta.ProviderID) }) @@ -106,32 +113,31 @@ func TestMetadataRetrieval(t *testing.T) { client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{}, nil) meta, err := instances.InstanceMetadata(ctx, node) - assert.ErrorIs(t, err, cloudprovider.InstanceNotFound) + require.ErrorIs(t, err, cloudprovider.InstanceNotFound) assert.Nil(t, meta) }) t.Run("should return data when linode is found (by name)", func(t *testing.T) { instances := newInstances(client) id := 123 - name := "mock-instance" - node := nodeWithName(name) + node := nodeWithName(instanceName) publicIPv4 := net.ParseIP("45.76.101.25") privateIPv4 := net.ParseIP("192.168.133.65") - linodeType := "g6-standard-1" - region := "us-east" + linodeType := typeG6 + region := usEast client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{ - {ID: id, Label: name, Type: linodeType, Region: region, IPv4: []*net.IP{&publicIPv4, &privateIPv4}}, + {ID: id, Label: instanceName, Type: linodeType, Region: region, IPv4: []*net.IP{&publicIPv4, &privateIPv4}}, }, nil) meta, err := instances.InstanceMetadata(ctx, node) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, providerIDPrefix+strconv.Itoa(id), meta.ProviderID) assert.Equal(t, region, meta.Region) assert.Equal(t, linodeType, meta.InstanceType) assert.Equal(t, []v1.NodeAddress{ { Type: v1.NodeHostName, - Address: name, + Address: instanceName, }, { Type: v1.NodeExternalIP, @@ -147,13 +153,11 @@ func TestMetadataRetrieval(t *testing.T) { t.Run("should return data when linode is found (by name) and addresses must be in order", func(t *testing.T) { instances := newInstances(client) id := 123 - name := "mock-instance" - node := nodeWithName(name) + node := nodeWithName(instanceName) publicIPv4 := net.ParseIP("45.76.101.25") privateIPv4 := net.ParseIP("192.168.133.65") ipv6Addr := "2001::8a2e:370:7348" - linodeType := "g6-standard-1" - region := "us-east" + linodeType := typeG6 Options.VPCNames = "test" vpcIDs["test"] = 1 @@ -161,9 +165,9 @@ func TestMetadataRetrieval(t *testing.T) { instance := linodego.Instance{ ID: id, - Label: name, + Label: instanceName, Type: linodeType, - Region: region, + Region: usEast, IPv4: []*net.IP{&publicIPv4, &privateIPv4}, IPv6: ipv6Addr, } @@ -198,14 +202,14 @@ func TestMetadataRetrieval(t *testing.T) { client.EXPECT().ListVPCIPAddresses(gomock.Any(), vpcIDs["test"], gomock.Any()).Return(routesInVPC, nil) meta, err := instances.InstanceMetadata(ctx, node) - assert.NoError(t, err) + require.NoError(t, err) assert.Equal(t, providerIDPrefix+strconv.Itoa(id), meta.ProviderID) - assert.Equal(t, region, meta.Region) + assert.Equal(t, usEast, meta.Region) assert.Equal(t, linodeType, meta.InstanceType) assert.Equal(t, []v1.NodeAddress{ { Type: v1.NodeHostName, - Address: name, + Address: instanceName, }, { Type: v1.NodeInternalIP, @@ -344,15 +348,15 @@ func TestMetadataRetrieval(t *testing.T) { ips = append(ips, &parsed) } - linodeType := "g6-standard-1" - region := "us-east" + linodeType := typeG6 + region := usEast client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{ {ID: id, Label: name, Type: linodeType, Region: region, IPv4: ips, IPv6: test.inputIPv6}, }, nil) meta, err := instances.InstanceMetadata(ctx, node) - assert.Equal(t, err, test.expectedErr) + assert.Equal(t, test.expectedErr, err) if test.expectedErr == nil { assert.Equal(t, region, meta.Region) assert.Equal(t, linodeType, meta.InstanceType) @@ -420,9 +424,9 @@ func TestMetadataRetrieval(t *testing.T) { meta, err := instances.InstanceMetadata(ctx, &node) if test.expectedErr != nil { assert.Nil(t, meta) - assert.Equal(t, err, test.expectedErr) + assert.Equal(t, test.expectedErr, err) } else { - assert.Nil(t, err) + require.NoError(t, err) assert.Equal(t, providerIDPrefix+strconv.Itoa(expectedInstance.ID), meta.ProviderID) } }) @@ -431,7 +435,7 @@ func TestMetadataRetrieval(t *testing.T) { } func TestMalformedProviders(t *testing.T) { - ctx := context.TODO() + ctx := t.Context() ctrl := gomock.NewController(t) defer ctrl.Finish() @@ -444,13 +448,13 @@ func TestMalformedProviders(t *testing.T) { client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{}, nil) meta, err := instances.InstanceMetadata(ctx, node) - assert.ErrorIs(t, err, invalidProviderIDError{providerID}) + require.ErrorIs(t, err, invalidProviderIDError{providerID}) assert.Nil(t, meta) }) } func TestInstanceShutdown(t *testing.T) { - ctx := context.TODO() + ctx := t.Context() ctrl := gomock.NewController(t) defer ctrl.Finish() @@ -463,7 +467,7 @@ func TestInstanceShutdown(t *testing.T) { client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{}, nil) shutdown, err := instances.InstanceShutdown(ctx, node) - assert.Error(t, err) + require.Error(t, err) assert.False(t, shutdown) }) @@ -474,7 +478,7 @@ func TestInstanceShutdown(t *testing.T) { client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{}, nil) shutdown, err := instances.InstanceShutdown(ctx, node) - assert.Error(t, err) + require.Error(t, err) assert.False(t, shutdown) }) @@ -487,7 +491,7 @@ func TestInstanceShutdown(t *testing.T) { }, nil) shutdown, err := instances.InstanceShutdown(ctx, node) - assert.NoError(t, err) + require.NoError(t, err) assert.True(t, shutdown) }) @@ -500,7 +504,7 @@ func TestInstanceShutdown(t *testing.T) { }, nil) shutdown, err := instances.InstanceShutdown(ctx, node) - assert.NoError(t, err) + require.NoError(t, err) assert.True(t, shutdown) }) @@ -513,7 +517,7 @@ func TestInstanceShutdown(t *testing.T) { }, nil) shutdown, err := instances.InstanceShutdown(ctx, node) - assert.NoError(t, err) + require.NoError(t, err) assert.False(t, shutdown) }) } diff --git a/cloud/linode/loadbalancers.go b/cloud/linode/loadbalancers.go index cccacde2..b57ae90d 100644 --- a/cloud/linode/loadbalancers.go +++ b/cloud/linode/loadbalancers.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "net" "net/http" "os" "reflect" @@ -12,6 +13,7 @@ import ( "strings" "time" + "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2alpha1" ciliumclient "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1" "github.com/linode/linodego" v1 "k8s.io/api/core/v1" @@ -29,7 +31,10 @@ import ( "github.com/linode/linode-cloud-controller-manager/sentry" ) -var errNoNodesAvailable = errors.New("no nodes available for nodebalancer") +var ( + errNoNodesAvailable = errors.New("no nodes available for nodebalancer") + maxConnThrottleStringLen int = 20 +) type lbNotFoundError struct { serviceNn string @@ -121,6 +126,7 @@ func (l *loadbalancers) cleanupOldNodeBalancer(ctx context.Context, service *v1. } previousNB, err := l.getNodeBalancerByStatus(ctx, service) + //nolint: errorlint //conversion to errors.Is() may break chainsaw tests switch err.(type) { case nil: // continue execution @@ -172,6 +178,7 @@ func (l *loadbalancers) GetLoadBalancer(ctx context.Context, clusterName string, } nb, err := l.getNodeBalancerForService(ctx, service) + //nolint: errorlint //conversion to errors.Is() may break chainsaw tests switch err.(type) { case nil: break @@ -207,7 +214,8 @@ func (l *loadbalancers) EnsureLoadBalancer(ctx context.Context, clusterName stri } // check for existing CiliumLoadBalancerIPPool for service - pool, err := l.getCiliumLBIPPool(ctx, service) + var pool *v2alpha1.CiliumLoadBalancerIPPool + pool, err = l.getCiliumLBIPPool(ctx, service) if err != nil && !k8serrors.IsNotFound(err) { klog.Infof("Failed to get CiliumLoadBalancerIPPool: %s", err.Error()) return nil, err @@ -249,6 +257,7 @@ func (l *loadbalancers) EnsureLoadBalancer(ctx context.Context, clusterName stri var nb *linodego.NodeBalancer nb, err = l.getNodeBalancerForService(ctx, service) + //nolint: errorlint //conversion to errors.Is() may break chainsaw tests switch err.(type) { case lbNotFoundError: if service.GetAnnotations()[annotations.AnnLinodeNodeBalancerID] != "" { @@ -288,7 +297,6 @@ func (l *loadbalancers) EnsureLoadBalancer(ctx context.Context, clusterName stri return lbStatus, nil } -//nolint:funlen func (l *loadbalancers) updateNodeBalancer( ctx context.Context, clusterName string, @@ -368,7 +376,8 @@ func (l *loadbalancers) updateNodeBalancer( oldNBNodeIDs := make(map[string]int) if currentNBCfg != nil { // Obtain list of current NB nodes and convert it to map of node IDs - currentNBNodes, err := l.client.ListNodeBalancerNodes(ctx, nb.ID, currentNBCfg.ID, nil) + var currentNBNodes []linodego.NodeBalancerNode + currentNBNodes, err = l.client.ListNodeBalancerNodes(ctx, nb.ID, currentNBCfg.ID, nil) if err != nil { // This error can be ignored, because if we fail to get nodes we can anyway rebuild the config from scratch, // it would just cause the NB to reload config even if the node list did not change, so we prefer to send IDs when it is posible. @@ -383,8 +392,23 @@ func (l *loadbalancers) updateNodeBalancer( } // Add all of the Nodes to the config newNBNodes := make([]linodego.NodeBalancerConfigRebuildNodeOptions, 0, len(nodes)) + subnetID := 0 + backendIPv4Range, ok := service.GetAnnotations()[annotations.NodeBalancerBackendIPv4Range] + if ok { + if err = validateNodeBalancerBackendIPv4Range(backendIPv4Range); err != nil { + return err + } + + var id int + id, err = l.getSubnetIDForSVC(ctx, service) + if err != nil { + sentry.CaptureError(ctx, err) + return fmt.Errorf("Error getting subnet ID for service %s: %w", service.Name, err) + } + subnetID = id + } for _, node := range nodes { - newNodeOpts := l.buildNodeBalancerNodeConfigRebuildOptions(node, port.NodePort) + newNodeOpts := l.buildNodeBalancerNodeConfigRebuildOptions(node, port.NodePort, subnetID) oldNodeID, ok := oldNBNodeIDs[newNodeOpts.Address] if ok { newNodeOpts.ID = oldNodeID @@ -402,7 +426,7 @@ func (l *loadbalancers) updateNodeBalancer( currentNBCfg, err = l.client.CreateNodeBalancerConfig(ctx, nb.ID, createOpts) if err != nil { sentry.CaptureError(ctx, err) - return fmt.Errorf("[port %d] error creating NodeBalancer config: %v", int(port.Port), err) + return fmt.Errorf("[port %d] error creating NodeBalancer config: %w", int(port.Port), err) } rebuildOpts = currentNBCfg.GetRebuildOptions() @@ -418,7 +442,7 @@ func (l *loadbalancers) updateNodeBalancer( if _, err = l.client.RebuildNodeBalancerConfig(ctx, nb.ID, currentNBCfg.ID, rebuildOpts); err != nil { sentry.CaptureError(ctx, err) - return fmt.Errorf("[port %d] error rebuilding NodeBalancer config: %v", int(port.Port), err) + return fmt.Errorf("[port %d] error rebuilding NodeBalancer config: %w", int(port.Port), err) } } @@ -443,7 +467,7 @@ func (l *loadbalancers) UpdateLoadBalancer(ctx context.Context, clusterName stri // make sure that IPs are shared properly on the Node if using load-balancers not backed by NodeBalancers for _, node := range nodes { - if err := l.handleIPSharing(ctx, node, ipHolderSuffix); err != nil { + if err = l.handleIPSharing(ctx, node, ipHolderSuffix); err != nil { return err } } @@ -455,7 +479,7 @@ func (l *loadbalancers) UpdateLoadBalancer(ctx context.Context, clusterName stri serviceWithStatus := service.DeepCopy() serviceWithStatus.Status.LoadBalancer, err = l.getLatestServiceLoadBalancerStatus(ctx, service) if err != nil { - return fmt.Errorf("failed to get latest LoadBalancer status for service (%s): %s", getServiceNn(service), err) + return fmt.Errorf("failed to get latest LoadBalancer status for service (%s): %w", getServiceNn(service), err) } nb, err := l.getNodeBalancerForService(ctx, serviceWithStatus) @@ -534,6 +558,7 @@ func (l *loadbalancers) EnsureLoadBalancerDeleted(ctx context.Context, clusterNa } nb, err := l.getNodeBalancerForService(ctx, service) + //nolint: errorlint //conversion to errors.Is() may break chainsaw tests switch getErr := err.(type) { case nil: break @@ -603,6 +628,7 @@ func (l *loadbalancers) getNodeBalancerByIPv4(ctx context.Context, service *v1.S func (l *loadbalancers) getNodeBalancerByID(ctx context.Context, service *v1.Service, id int) (*linodego.NodeBalancer, error) { nb, err := l.client.GetNodeBalancer(ctx, id) if err != nil { + //nolint: errorlint //need type assertion for code field to work if apiErr, ok := err.(*linodego.Error); ok && apiErr.Code == http.StatusNotFound { return nil, lbNotFoundError{serviceNn: getServiceNn(service), nodeBalancerID: id} } @@ -627,17 +653,46 @@ func (l *loadbalancers) GetLoadBalancerTags(_ context.Context, clusterName strin return tags } +// GetLinodeNBType returns the NodeBalancer type for the service. +func (l *loadbalancers) GetLinodeNBType(service *v1.Service) linodego.NodeBalancerPlanType { + typeStr, ok := service.GetAnnotations()[annotations.AnnLinodeNodeBalancerType] + if ok && linodego.NodeBalancerPlanType(typeStr) == linodego.NBTypePremium { + return linodego.NBTypePremium + } + + return linodego.NodeBalancerPlanType(Options.DefaultNBType) +} + func (l *loadbalancers) createNodeBalancer(ctx context.Context, clusterName string, service *v1.Service, configs []*linodego.NodeBalancerConfigCreateOptions) (lb *linodego.NodeBalancer, err error) { connThrottle := getConnectionThrottle(service) label := l.GetLoadBalancerName(ctx, clusterName, service) tags := l.GetLoadBalancerTags(ctx, clusterName, service) + nbType := l.GetLinodeNBType(service) createOpts := linodego.NodeBalancerCreateOptions{ Label: &label, Region: l.zone, ClientConnThrottle: &connThrottle, Configs: configs, Tags: tags, + Type: nbType, + } + + backendIPv4Range, ok := service.GetAnnotations()[annotations.NodeBalancerBackendIPv4Range] + if ok { + if err := validateNodeBalancerBackendIPv4Range(backendIPv4Range); err != nil { + return nil, err + } + subnetID, err := l.getSubnetIDForSVC(ctx, service) + if err != nil { + return nil, err + } + createOpts.VPCs = []linodego.NodeBalancerVPCOptions{ + { + SubnetID: subnetID, + IPv4Range: backendIPv4Range, + }, + } } fwid, ok := service.GetAnnotations()[annotations.AnnLinodeCloudFirewallID] @@ -668,9 +723,8 @@ func (l *loadbalancers) createNodeBalancer(ctx context.Context, clusterName stri return l.client.CreateNodeBalancer(ctx, createOpts) } -//nolint:funlen func (l *loadbalancers) buildNodeBalancerConfig(ctx context.Context, service *v1.Service, port int) (linodego.NodeBalancerConfig, error) { - portConfig, err := getPortConfig(service, port) + portConfigResult, err := getPortConfig(service, port) if err != nil { return linodego.NodeBalancerConfig{}, err } @@ -682,8 +736,8 @@ func (l *loadbalancers) buildNodeBalancerConfig(ctx context.Context, service *v1 config := linodego.NodeBalancerConfig{ Port: port, - Protocol: portConfig.Protocol, - ProxyProtocol: portConfig.ProxyProtocol, + Protocol: portConfigResult.Protocol, + ProxyProtocol: portConfigResult.ProxyProtocol, Check: health, } @@ -734,8 +788,8 @@ func (l *loadbalancers) buildNodeBalancerConfig(ctx context.Context, service *v1 } config.CheckPassive = checkPassive - if portConfig.Protocol == linodego.ProtocolHTTPS { - if err = l.addTLSCert(ctx, service, &config, portConfig); err != nil { + if portConfigResult.Protocol == linodego.ProtocolHTTPS { + if err = l.addTLSCert(ctx, service, &config, portConfigResult); err != nil { return config, err } } @@ -756,6 +810,28 @@ func (l *loadbalancers) addTLSCert(ctx context.Context, service *v1.Service, nbC return nil } +// getSubnetIDForSVC returns the subnet ID for the service's VPC and subnet. +// By default, first VPCName and SubnetName are used to calculate subnet id for the service. +// If the service has annotations specifying VPCName and SubnetName, they are used instead. +func (l *loadbalancers) getSubnetIDForSVC(ctx context.Context, service *v1.Service) (int, error) { + if Options.VPCNames == "" { + return 0, fmt.Errorf("CCM not configured with VPC, cannot create NodeBalancer with specified annotation") + } + vpcName := strings.Split(Options.VPCNames, ",")[0] + if specifiedVPCName, ok := service.GetAnnotations()[annotations.NodeBalancerBackendVPCName]; ok { + vpcName = specifiedVPCName + } + vpcID, err := GetVPCID(ctx, l.client, vpcName) + if err != nil { + return 0, err + } + subnetName := strings.Split(Options.SubnetNames, ",")[0] + if specifiedSubnetName, ok := service.GetAnnotations()[annotations.NodeBalancerBackendSubnetName]; ok { + subnetName = specifiedSubnetName + } + return GetSubnetID(ctx, l.client, vpcID, subnetName) +} + // buildLoadBalancerRequest returns a linodego.NodeBalancer // requests for service across nodes. func (l *loadbalancers) buildLoadBalancerRequest(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) (*linodego.NodeBalancer, error) { @@ -765,6 +841,19 @@ func (l *loadbalancers) buildLoadBalancerRequest(ctx context.Context, clusterNam ports := service.Spec.Ports configs := make([]*linodego.NodeBalancerConfigCreateOptions, 0, len(ports)) + subnetID := 0 + backendIPv4Range, ok := service.GetAnnotations()[annotations.NodeBalancerBackendIPv4Range] + if ok { + if err := validateNodeBalancerBackendIPv4Range(backendIPv4Range); err != nil { + return nil, err + } + id, err := l.getSubnetIDForSVC(ctx, service) + if err != nil { + return nil, err + } + subnetID = id + } + for _, port := range ports { if port.Protocol == v1.ProtocolUDP { return nil, fmt.Errorf("error creating NodeBalancer Config: ports with the UDP protocol are not supported") @@ -777,7 +866,7 @@ func (l *loadbalancers) buildLoadBalancerRequest(ctx context.Context, clusterNam createOpt := config.GetCreateOptions() for _, n := range nodes { - createOpt.Nodes = append(createOpt.Nodes, l.buildNodeBalancerNodeConfigRebuildOptions(n, port.NodePort).NodeBalancerNodeCreateOptions) + createOpt.Nodes = append(createOpt.Nodes, l.buildNodeBalancerNodeConfigRebuildOptions(n, port.NodePort, subnetID).NodeBalancerNodeCreateOptions) } configs = append(configs, &createOpt) @@ -785,22 +874,22 @@ func (l *loadbalancers) buildLoadBalancerRequest(ctx context.Context, clusterNam return l.createNodeBalancer(ctx, clusterName, service, configs) } -func coerceString(s string, minLen, maxLen int, padding string) string { +func coerceString(str string, minLen, maxLen int, padding string) string { if len(padding) == 0 { padding = "x" } - if len(s) > maxLen { - return s[:maxLen] - } else if len(s) < minLen { - return coerceString(fmt.Sprintf("%s%s", padding, s), minLen, maxLen, padding) + if len(str) > maxLen { + return str[:maxLen] + } else if len(str) < minLen { + return coerceString(fmt.Sprintf("%s%s", padding, str), minLen, maxLen, padding) } - return s + return str } -func (l *loadbalancers) buildNodeBalancerNodeConfigRebuildOptions(node *v1.Node, nodePort int32) linodego.NodeBalancerConfigRebuildNodeOptions { - return linodego.NodeBalancerConfigRebuildNodeOptions{ +func (l *loadbalancers) buildNodeBalancerNodeConfigRebuildOptions(node *v1.Node, nodePort int32, subnetID int) linodego.NodeBalancerConfigRebuildNodeOptions { + nodeOptions := linodego.NodeBalancerConfigRebuildNodeOptions{ NodeBalancerNodeCreateOptions: linodego.NodeBalancerNodeCreateOptions{ - Address: fmt.Sprintf("%v:%v", getNodePrivateIP(node), nodePort), + Address: fmt.Sprintf("%v:%v", getNodePrivateIP(node, subnetID), nodePort), // NodeBalancer backends must be 3-32 chars in length // If < 3 chars, pad node name with "node-" prefix Label: coerceString(node.Name, 3, 32, "node-"), @@ -808,6 +897,10 @@ func (l *loadbalancers) buildNodeBalancerNodeConfigRebuildOptions(node *v1.Node, Weight: 100, }, } + if subnetID != 0 { + nodeOptions.NodeBalancerNodeCreateOptions.SubnetID = subnetID + } + return nodeOptions } func (l *loadbalancers) retrieveKubeClient() error { @@ -842,12 +935,12 @@ func (l *loadbalancers) retrieveKubeClient() error { } func getPortConfig(service *v1.Service, port int) (portConfig, error) { - portConfig := portConfig{} - portConfigAnnotation, err := getPortConfigAnnotation(service, port) + portConfigResult := portConfig{} + portConfigAnnotationResult, err := getPortConfigAnnotation(service, port) if err != nil { - return portConfig, err + return portConfigResult, err } - protocol := portConfigAnnotation.Protocol + protocol := portConfigAnnotationResult.Protocol if protocol == "" { protocol = "tcp" if p, ok := service.GetAnnotations()[annotations.AnnLinodeDefaultProtocol]; ok { @@ -856,7 +949,7 @@ func getPortConfig(service *v1.Service, port int) (portConfig, error) { } protocol = strings.ToLower(protocol) - proxyProtocol := portConfigAnnotation.ProxyProtocol + proxyProtocol := portConfigAnnotationResult.ProxyProtocol if proxyProtocol == "" { proxyProtocol = string(linodego.ProxyProtocolNone) for _, ann := range []string{annotations.AnnLinodeDefaultProxyProtocol, annLinodeProxyProtocolDeprecated} { @@ -868,22 +961,22 @@ func getPortConfig(service *v1.Service, port int) (portConfig, error) { } if protocol != "tcp" && protocol != "http" && protocol != "https" { - return portConfig, fmt.Errorf("invalid protocol: %q specified", protocol) + return portConfigResult, fmt.Errorf("invalid protocol: %q specified", protocol) } switch proxyProtocol { case string(linodego.ProxyProtocolNone), string(linodego.ProxyProtocolV1), string(linodego.ProxyProtocolV2): break default: - return portConfig, fmt.Errorf("invalid NodeBalancer proxy protocol value '%s'", proxyProtocol) + return portConfigResult, fmt.Errorf("invalid NodeBalancer proxy protocol value '%s'", proxyProtocol) } - portConfig.Port = port - portConfig.Protocol = linodego.ConfigProtocol(protocol) - portConfig.ProxyProtocol = linodego.ConfigProxyProtocol(proxyProtocol) - portConfig.TLSSecretName = portConfigAnnotation.TLSSecretName + portConfigResult.Port = port + portConfigResult.Protocol = linodego.ConfigProtocol(protocol) + portConfigResult.ProxyProtocol = linodego.ConfigProxyProtocol(proxyProtocol) + portConfigResult.TLSSecretName = portConfigAnnotationResult.TLSSecretName - return portConfig, nil + return portConfigResult, nil } func getHealthCheckType(service *v1.Service) (linodego.ConfigCheck, error) { @@ -914,13 +1007,17 @@ func getPortConfigAnnotation(service *v1.Service, port int) (portConfigAnnotatio return annotation, nil } -// getNodePrivateIP should provide the Linode Private IP the NodeBalance -// will communicate with. When using a VLAN or VPC for the Kubernetes cluster -// network, this will not be the NodeInternalIP, so this prefers an annotation -// cluster operators may specify in such a situation. -func getNodePrivateIP(node *v1.Node) string { - if address, exists := node.Annotations[annotations.AnnLinodeNodePrivateIP]; exists { - return address +// getNodePrivateIP provides the Linode Backend IP the NodeBalancer will communicate with. +// If a service specifies NodeBalancerBackendIPv4Range annotation, it will +// use NodeInternalIP of node. +// For services which don't have NodeBalancerBackendIPv4Range annotation, +// Backend IP can be overwritten to the one specified using AnnLinodeNodePrivateIP +// annotation over the NodeInternalIP. +func getNodePrivateIP(node *v1.Node, subnetID int) string { + if subnetID == 0 { + if address, exists := node.Annotations[annotations.AnnLinodeNodePrivateIP]; exists { + return address + } } klog.Infof("Node %s, assigned IP addresses: %v", node.Name, node.Status.Addresses) @@ -962,8 +1059,8 @@ func getConnectionThrottle(service *v1.Service) int { parsed = 0 } - if parsed > 20 { - parsed = 20 + if parsed > maxConnThrottleStringLen { + parsed = maxConnThrottleStringLen } connThrottle = parsed } @@ -976,13 +1073,44 @@ func makeLoadBalancerStatus(service *v1.Service, nb *linodego.NodeBalancer) *v1. ingress := v1.LoadBalancerIngress{ Hostname: *nb.Hostname, } - if !getServiceBoolAnnotation(service, annotations.AnnLinodeHostnameOnlyIngress) { - if val := envBoolOptions("LINODE_HOSTNAME_ONLY_INGRESS"); val { - klog.Infof("LINODE_HOSTNAME_ONLY_INGRESS: (%v)", val) - } else { - ingress.IP = *nb.IPv4 + + // Return hostname-only if annotation is set or environment variable is set + if getServiceBoolAnnotation(service, annotations.AnnLinodeHostnameOnlyIngress) { + return &v1.LoadBalancerStatus{ + Ingress: []v1.LoadBalancerIngress{ingress}, } } + + if val := envBoolOptions("LINODE_HOSTNAME_ONLY_INGRESS"); val { + klog.Infof("LINODE_HOSTNAME_ONLY_INGRESS: (%v)", val) + return &v1.LoadBalancerStatus{ + Ingress: []v1.LoadBalancerIngress{ingress}, + } + } + + // Check for per-service IPv6 annotation first, then fall back to global setting + useIPv6 := getServiceBoolAnnotation(service, annotations.AnnLinodeEnableIPv6Ingress) || Options.EnableIPv6ForLoadBalancers + + // When IPv6 is enabled (either per-service or globally), include both IPv4 and IPv6 + if useIPv6 && nb.IPv6 != nil && *nb.IPv6 != "" { + ingresses := []v1.LoadBalancerIngress{ + { + Hostname: *nb.Hostname, + IP: *nb.IPv4, + }, + { + Hostname: *nb.Hostname, + IP: *nb.IPv6, + }, + } + klog.V(4).Infof("Using both IPv4 and IPv6 addresses for NodeBalancer (%d): %s, %s", nb.ID, *nb.IPv4, *nb.IPv6) + return &v1.LoadBalancerStatus{ + Ingress: ingresses, + } + } + + // Default case - just use IPv4 + ingress.IP = *nb.IPv4 return &v1.LoadBalancerStatus{ Ingress: []v1.LoadBalancerIngress{ingress}, } @@ -1010,3 +1138,32 @@ func getServiceBoolAnnotation(service *v1.Service, name string) bool { boolValue, err := strconv.ParseBool(value) return err == nil && boolValue } + +// validateNodeBalancerBackendIPv4Range validates the NodeBalancerBackendIPv4Range +// annotation to be within the NodeBalancerBackendIPv4Subnet if it is set. +func validateNodeBalancerBackendIPv4Range(backendIPv4Range string) error { + if Options.NodeBalancerBackendIPv4Subnet == "" { + return nil + } + withinCIDR, err := isCIDRWithinCIDR(Options.NodeBalancerBackendIPv4Subnet, backendIPv4Range) + if err != nil { + return fmt.Errorf("invalid IPv4 range: %w", err) + } + if !withinCIDR { + return fmt.Errorf("IPv4 range %s is not within the subnet %s", backendIPv4Range, Options.NodeBalancerBackendIPv4Subnet) + } + return nil +} + +// isCIDRWithinCIDR returns true if the inner CIDR is within the outer CIDR. +func isCIDRWithinCIDR(outer, inner string) (bool, error) { + _, ipNet1, err := net.ParseCIDR(outer) + if err != nil { + return false, fmt.Errorf("invalid CIDR: %w", err) + } + _, ipNet2, err := net.ParseCIDR(inner) + if err != nil { + return false, fmt.Errorf("invalid CIDR: %w", err) + } + return ipNet1.Contains(ipNet2.IP), nil +} diff --git a/cloud/linode/loadbalancers_test.go b/cloud/linode/loadbalancers_test.go index 6657b35f..83cd431a 100644 --- a/cloud/linode/loadbalancers_test.go +++ b/cloud/linode/loadbalancers_test.go @@ -17,6 +17,7 @@ import ( "strings" "testing" + ciliumclient "github.com/cilium/cilium/pkg/k8s/client/clientset/versioned/typed/cilium.io/v2alpha1" "github.com/linode/linodego" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" @@ -27,6 +28,7 @@ import ( "k8s.io/client-go/kubernetes/fake" "github.com/linode/linode-cloud-controller-manager/cloud/annotations" + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client" "github.com/linode/linode-cloud-controller-manager/cloud/linode/firewall" ) @@ -113,6 +115,8 @@ o/aoxqmE0mN1lyCPOa9UP//LlsREkWVKI3+Wld/xERtzf66hjcH+ilsXDxxpMEXo bSiPJQsGIKtQvyCaZY2szyOoeUGgOId+He7ITlezxKrjdj+1pLMESvAxKeo= -----END RSA PRIVATE KEY-----` +const drop string = "DROP" + func TestCCMLoadBalancers(t *testing.T) { testCases := []struct { name string @@ -150,6 +154,18 @@ func TestCCMLoadBalancers(t *testing.T) { name: "Create Load Balancer With Invalid Firewall ACL - NO Allow Or Deny", f: testCreateNodeBalanceWithNoAllowOrDenyList, }, + { + name: "Create Load Balancer With VPC Backend", + f: testCreateNodeBalancerWithVPCBackend, + }, + { + name: "Update Load Balancer With VPC Backend", + f: testUpdateNodeBalancerWithVPCBackend, + }, + { + name: "Create Load Balancer With VPC Backend - Overwrite VPC Name and Subnet with Annotation", + f: testCreateNodeBalancerWithVPCAnnotationOverwrite, + }, { name: "Create Load Balancer With Global Tags set", f: testCreateNodeBalancerWithGlobalTags, @@ -246,6 +262,10 @@ func TestCCMLoadBalancers(t *testing.T) { name: "makeLoadBalancerStatus", f: testMakeLoadBalancerStatus, }, + { + name: "makeLoadBalancerStatusWithIPv6", + f: testMakeLoadBalancerStatusWithIPv6, + }, { name: "makeLoadBalancerStatusEnvVar", f: testMakeLoadBalancerStatusEnvVar, @@ -258,6 +278,10 @@ func TestCCMLoadBalancers(t *testing.T) { name: "Update Load Balancer - No Nodes", f: testUpdateLoadBalancerNoNodes, }, + { + name: "Create Load Balancer - Very long Service name", + f: testVeryLongServiceName, + }, } for _, tc := range testCases { @@ -279,6 +303,8 @@ func stubService(fake *fake.Clientset, service *v1.Service) { } func testCreateNodeBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI, annMap map[string]string, expectedTags []string) error { + t.Helper() + svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: randString(), @@ -308,11 +334,14 @@ func testCreateNodeBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI, a for key, value := range annMap { svc.Annotations[key] = value } - lb := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + if !assertion { + t.Error("type assertion failed") + } nodes := []*v1.Node{ {ObjectMeta: metav1.ObjectMeta{Name: "node-1"}}, } - nb, err := lb.buildLoadBalancerRequest(context.TODO(), "linodelb", svc, nodes) + nb, err := lb.buildLoadBalancerRequest(t.Context(), "linodelb", svc, nodes) if err != nil { return err } @@ -323,7 +352,7 @@ func testCreateNodeBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI, a t.Logf("actual: %s", nb.Region) } - configs, err := client.ListNodeBalancerConfigs(context.TODO(), nb.ID, nil) + configs, err := client.ListNodeBalancerConfigs(t.Context(), nb.ID, nil) if err != nil { return err } @@ -334,7 +363,7 @@ func testCreateNodeBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI, a t.Logf("actual: %v", len(configs)) } - nb, err = client.GetNodeBalancer(context.TODO(), nb.ID) + nb, err = client.GetNodeBalancer(t.Context(), nb.ID) if err != nil { return err } @@ -357,7 +386,7 @@ func testCreateNodeBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI, a _, ok := annMap[annotations.AnnLinodeCloudFirewallACL] if ok { // a firewall was configured for this - firewalls, err := client.ListNodeBalancerFirewalls(context.TODO(), nb.ID, &linodego.ListOptions{}) + firewalls, err := client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) if err != nil { t.Errorf("Expected nil error, got %v", err) } @@ -367,11 +396,13 @@ func testCreateNodeBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI, a } } - defer func() { _ = lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) }() + defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) }() return nil } func testCreateNodeBalancerWithOutFirewall(t *testing.T, client *linodego.Client, f *fakeAPI) { + t.Helper() + err := testCreateNodeBalancer(t, client, f, nil, nil) if err != nil { t.Fatalf("expected a nil error, got %v", err) @@ -379,6 +410,8 @@ func testCreateNodeBalancerWithOutFirewall(t *testing.T, client *linodego.Client } func testCreateNodeBalanceWithNoAllowOrDenyList(t *testing.T, client *linodego.Client, f *fakeAPI) { + t.Helper() + annotations := map[string]string{ annotations.AnnLinodeCloudFirewallACL: `{}`, } @@ -390,6 +423,8 @@ func testCreateNodeBalanceWithNoAllowOrDenyList(t *testing.T, client *linodego.C } func testCreateNodeBalanceWithBothAllowOrDenyList(t *testing.T, client *linodego.Client, f *fakeAPI) { + t.Helper() + annotations := map[string]string{ annotations.AnnLinodeCloudFirewallACL: `{ "allowList": { @@ -410,6 +445,8 @@ func testCreateNodeBalanceWithBothAllowOrDenyList(t *testing.T, client *linodego } func testCreateNodeBalancerWithAllowList(t *testing.T, client *linodego.Client, f *fakeAPI) { + t.Helper() + annotations := map[string]string{ annotations.AnnLinodeCloudFirewallACL: `{ "allowList": { @@ -426,6 +463,8 @@ func testCreateNodeBalancerWithAllowList(t *testing.T, client *linodego.Client, } func testCreateNodeBalancerWithDenyList(t *testing.T, client *linodego.Client, f *fakeAPI) { + t.Helper() + annotations := map[string]string{ annotations.AnnLinodeCloudFirewallACL: `{ "denyList": { @@ -442,6 +481,8 @@ func testCreateNodeBalancerWithDenyList(t *testing.T, client *linodego.Client, f } func testCreateNodeBalancerWithFirewall(t *testing.T, client *linodego.Client, f *fakeAPI) { + t.Helper() + annotations := map[string]string{ annotations.AnnLinodeCloudFirewallID: "123", } @@ -452,6 +493,8 @@ func testCreateNodeBalancerWithFirewall(t *testing.T, client *linodego.Client, f } func testCreateNodeBalancerWithInvalidFirewall(t *testing.T, client *linodego.Client, f *fakeAPI) { + t.Helper() + annotations := map[string]string{ annotations.AnnLinodeCloudFirewallID: "qwerty", } @@ -463,6 +506,8 @@ func testCreateNodeBalancerWithInvalidFirewall(t *testing.T, client *linodego.Cl } func testCreateNodeBalancerWithGlobalTags(t *testing.T, client *linodego.Client, f *fakeAPI) { + t.Helper() + original := Options.NodeBalancerTags defer func() { Options.NodeBalancerTags = original @@ -475,7 +520,193 @@ func testCreateNodeBalancerWithGlobalTags(t *testing.T, client *linodego.Client, } } +func testCreateNodeBalancerWithVPCBackend(t *testing.T, client *linodego.Client, f *fakeAPI) { + t.Helper() + + // test when no VPCs are present + ann := map[string]string{ + annotations.NodeBalancerBackendIPv4Range: "10.100.0.0/30", + } + if err := testCreateNodeBalancer(t, client, f, ann, nil); err == nil { + t.Fatalf("expected nodebalancer creation to fail") + } + + f.ResetRequests() + + // provision vpc and test again + vpcNames := Options.VPCNames + subnetNames := Options.SubnetNames + defer func() { + Options.VPCNames = vpcNames + Options.SubnetNames = subnetNames + }() + Options.VPCNames = "test1" + Options.SubnetNames = "default" + _, _ = client.CreateVPC(t.Context(), linodego.VPCCreateOptions{ + Label: "test1", + Description: "", + Region: "us-west", + Subnets: []linodego.VPCSubnetCreateOptions{ + { + Label: "default", + IPv4: "10.0.0.0/8", + }, + }, + }) + + err := testCreateNodeBalancer(t, client, f, ann, nil) + if err != nil { + t.Fatalf("expected a nil error, got %v", err) + } + + f.ResetRequests() + + // test with IPv4Range outside of defined NodeBalancer subnet + nodebalancerBackendIPv4Subnet := Options.NodeBalancerBackendIPv4Subnet + defer func() { + Options.NodeBalancerBackendIPv4Subnet = nodebalancerBackendIPv4Subnet + }() + Options.NodeBalancerBackendIPv4Subnet = "10.99.0.0/24" + if err := testCreateNodeBalancer(t, client, f, ann, nil); err == nil { + t.Fatalf("expected nodebalancer creation to fail") + } +} + +func testUpdateNodeBalancerWithVPCBackend(t *testing.T, client *linodego.Client, f *fakeAPI) { + t.Helper() + + // provision vpc and test + vpcNames := Options.VPCNames + subnetNames := Options.SubnetNames + defer func() { + Options.VPCNames = vpcNames + Options.SubnetNames = subnetNames + }() + Options.VPCNames = "test1" + Options.SubnetNames = "default" + _, _ = client.CreateVPC(t.Context(), linodego.VPCCreateOptions{ + Label: "test1", + Description: "", + Region: "us-west", + Subnets: []linodego.VPCSubnetCreateOptions{ + { + Label: "default", + IPv4: "10.0.0.0/8", + }, + }, + }) + + svc := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: randString(), + UID: "foobar123", + Annotations: map[string]string{ + annotations.NodeBalancerBackendIPv4Range: "10.100.0.0/30", + }, + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{ + { + Name: randString(), + Protocol: "TCP", + Port: int32(80), + NodePort: int32(30000), + }, + }, + }, + } + + nodes := []*v1.Node{ + { + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + { + Type: v1.NodeInternalIP, + Address: "127.0.0.1", + }, + }, + }, + }, + } + + lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + if !assertion { + t.Error("type assertion failed") + } + fakeClientset := fake.NewSimpleClientset() + lb.kubeClient = fakeClientset + + defer func() { + _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) + }() + + lbStatus, err := lb.EnsureLoadBalancer(t.Context(), "linodelb", svc, nodes) + if err != nil { + t.Errorf("EnsureLoadBalancer returned an error: %s", err) + } + svc.Status.LoadBalancer = *lbStatus + + stubService(fakeClientset, svc) + svc.ObjectMeta.SetAnnotations(map[string]string{ + annotations.NodeBalancerBackendIPv4Range: "10.100.1.0/30", + }) + + err = lb.UpdateLoadBalancer(t.Context(), "linodelb", svc, nodes) + if err != nil { + t.Errorf("UpdateLoadBalancer returned an error while updated annotations: %s", err) + } +} + +func testCreateNodeBalancerWithVPCAnnotationOverwrite(t *testing.T, client *linodego.Client, f *fakeAPI) { + t.Helper() + + // provision multiple vpcs + vpcNames := Options.VPCNames + nodebalancerBackendIPv4Subnet := Options.NodeBalancerBackendIPv4Subnet + defer func() { + Options.VPCNames = vpcNames + Options.NodeBalancerBackendIPv4Subnet = nodebalancerBackendIPv4Subnet + }() + Options.VPCNames = "test1" + Options.NodeBalancerBackendIPv4Subnet = "10.100.0.0/24" + + _, _ = client.CreateVPC(t.Context(), linodego.VPCCreateOptions{ + Label: "test1", + Description: "", + Region: "us-west", + Subnets: []linodego.VPCSubnetCreateOptions{ + { + Label: "default", + IPv4: "10.0.0.0/8", + }, + }, + }) + + _, _ = client.CreateVPC(t.Context(), linodego.VPCCreateOptions{ + Label: "test2", + Description: "", + Region: "us-west", + Subnets: []linodego.VPCSubnetCreateOptions{ + { + Label: "subnet1", + IPv4: "10.0.0.0/8", + }, + }, + }) + ann := map[string]string{ + annotations.NodeBalancerBackendIPv4Range: "10.100.0.0/30", + annotations.NodeBalancerBackendVPCName: "test2", + annotations.NodeBalancerBackendSubnetName: "subnet1", + } + err := testCreateNodeBalancer(t, client, f, ann, nil) + if err != nil { + t.Fatalf("expected a nil error, got %v", err) + } +} + func testUpdateLoadBalancerAddNode(t *testing.T, client *linodego.Client, f *fakeAPI) { + t.Helper() + svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: randString(), @@ -539,15 +770,18 @@ func testUpdateLoadBalancerAddNode(t *testing.T, client *linodego.Client, f *fak }, } - lb := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + if !assertion { + t.Error("type assertion failed") + } fakeClientset := fake.NewSimpleClientset() lb.kubeClient = fakeClientset defer func() { - _ = lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) + _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) }() - lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes1) + lbStatus, err := lb.EnsureLoadBalancer(t.Context(), "linodelb", svc, nodes1) if err != nil { t.Errorf("EnsureLoadBalancer returned an error %s", err) } @@ -557,7 +791,7 @@ func testUpdateLoadBalancerAddNode(t *testing.T, client *linodego.Client, f *fak f.ResetRequests() - err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, nodes1) + err = lb.UpdateLoadBalancer(t.Context(), "linodelb", svc, nodes1) if err != nil { t.Errorf("UpdateLoadBalancer returned an error while updated LB to have one node: %s", err) } @@ -574,11 +808,12 @@ func testUpdateLoadBalancerAddNode(t *testing.T, client *linodego.Client, f *fak if req == nil { t.Fatalf("Nodebalancer config rebuild request was not called.") + return 0, 0 // explicitly return to satisfy staticcheck } var nbcro linodego.NodeBalancerConfigRebuildOptions - if err := json.Unmarshal([]byte(req.Body), &nbcro); err != nil { + if err = json.Unmarshal([]byte(req.Body), &nbcro); err != nil { t.Fatalf("Unable to unmarshall request body %#v, error: %#v", req.Body, err) } @@ -601,7 +836,7 @@ func testUpdateLoadBalancerAddNode(t *testing.T, client *linodego.Client, f *fak } f.ResetRequests() - err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, nodes2) + err = lb.UpdateLoadBalancer(t.Context(), "linodelb", svc, nodes2) if err != nil { t.Errorf("UpdateLoadBalancer returned an error while updated LB to have three nodes: %s", err) } @@ -614,7 +849,7 @@ func testUpdateLoadBalancerAddNode(t *testing.T, client *linodego.Client, f *fak } f.ResetRequests() - err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, nodes2) + err = lb.UpdateLoadBalancer(t.Context(), "linodelb", svc, nodes2) if err != nil { t.Errorf("UpdateLoadBalancer returned an error while updated LB to have three nodes second time: %s", err) } @@ -626,12 +861,12 @@ func testUpdateLoadBalancerAddNode(t *testing.T, client *linodego.Client, f *fak t.Fatalf("Expected ID to be set just on all three nodes when updating the NB with all three nodes which were pre-existing, instead it is set on %d nodes", nodeswithIdcount) } - nb, err := lb.getNodeBalancerByStatus(context.TODO(), svc) + nb, err := lb.getNodeBalancerByStatus(t.Context(), svc) if err != nil { t.Fatalf("failed to get NodeBalancer via status: %s", err) } - cfgs, errConfigs := client.ListNodeBalancerConfigs(context.TODO(), nb.ID, nil) + cfgs, errConfigs := client.ListNodeBalancerConfigs(t.Context(), nb.ID, nil) if errConfigs != nil { t.Fatalf("error getting NodeBalancer configs: %v", errConfigs) } @@ -643,7 +878,7 @@ func testUpdateLoadBalancerAddNode(t *testing.T, client *linodego.Client, f *fak observedPorts := make(map[int]struct{}) for _, cfg := range cfgs { - nbnodes, errNodes := client.ListNodeBalancerNodes(context.TODO(), nb.ID, cfg.ID, nil) + nbnodes, errNodes := client.ListNodeBalancerNodes(t.Context(), nb.ID, cfg.ID, nil) if errNodes != nil { t.Errorf("error getting NodeBalancer nodes: %v", errNodes) } @@ -665,6 +900,8 @@ func testUpdateLoadBalancerAddNode(t *testing.T, client *linodego.Client, f *fak } func testUpdateLoadBalancerAddAnnotation(t *testing.T, client *linodego.Client, _ *fakeAPI) { + t.Helper() + svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: randString(), @@ -698,15 +935,18 @@ func testUpdateLoadBalancerAddAnnotation(t *testing.T, client *linodego.Client, }, } - lb := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + if !assertion { + t.Error("type assertion failed") + } fakeClientset := fake.NewSimpleClientset() lb.kubeClient = fakeClientset defer func() { - _ = lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) + _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) }() - lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + lbStatus, err := lb.EnsureLoadBalancer(t.Context(), "linodelb", svc, nodes) if err != nil { t.Errorf("EnsureLoadBalancer returned an error: %s", err) } @@ -717,12 +957,12 @@ func testUpdateLoadBalancerAddAnnotation(t *testing.T, client *linodego.Client, annotations.AnnLinodeThrottle: "10", }) - err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, nodes) + err = lb.UpdateLoadBalancer(t.Context(), "linodelb", svc, nodes) if err != nil { t.Errorf("UpdateLoadBalancer returned an error while updated annotations: %s", err) } - nb, err := lb.getNodeBalancerByStatus(context.TODO(), svc) + nb, err := lb.getNodeBalancerByStatus(t.Context(), svc) if err != nil { t.Fatalf("failed to get NodeBalancer via status: %s", err) } @@ -735,6 +975,8 @@ func testUpdateLoadBalancerAddAnnotation(t *testing.T, client *linodego.Client, } func testUpdateLoadBalancerAddPortAnnotation(t *testing.T, client *linodego.Client, _ *fakeAPI) { + t.Helper() + targetTestPort := 80 portConfigAnnotation := fmt.Sprintf("%s%d", annotations.AnnLinodePortConfigPrefix, targetTestPort) svc := &v1.Service{ @@ -768,15 +1010,18 @@ func testUpdateLoadBalancerAddPortAnnotation(t *testing.T, client *linodego.Clie }, } - lb := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + if !assertion { + t.Error("type assertion failed") + } fakeClientset := fake.NewSimpleClientset() lb.kubeClient = fakeClientset defer func() { - _ = lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) + _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) }() - lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + lbStatus, err := lb.EnsureLoadBalancer(t.Context(), "linodelb", svc, nodes) if err != nil { t.Errorf("EnsureLoadBalancer returned an error: %s", err) } @@ -787,17 +1032,17 @@ func testUpdateLoadBalancerAddPortAnnotation(t *testing.T, client *linodego.Clie portConfigAnnotation: `{"protocol": "http"}`, }) - err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, nodes) + err = lb.UpdateLoadBalancer(t.Context(), "linodelb", svc, nodes) if err != nil { t.Fatalf("UpdateLoadBalancer returned an error while updated annotations: %s", err) } - nb, err := lb.getNodeBalancerByStatus(context.TODO(), svc) + nb, err := lb.getNodeBalancerByStatus(t.Context(), svc) if err != nil { t.Fatalf("failed to get NodeBalancer by status: %v", err) } - cfgs, errConfigs := client.ListNodeBalancerConfigs(context.TODO(), nb.ID, nil) + cfgs, errConfigs := client.ListNodeBalancerConfigs(t.Context(), nb.ID, nil) if errConfigs != nil { t.Fatalf("error getting NodeBalancer configs: %v", errConfigs) } @@ -816,7 +1061,103 @@ func testUpdateLoadBalancerAddPortAnnotation(t *testing.T, client *linodego.Clie } } +func testVeryLongServiceName(t *testing.T, client *linodego.Client, _ *fakeAPI) { + t.Helper() + + ipv4DenyList := make([]string, 130) + ipv6DenyList := make([]string, 130) + + for i := 0; i < len(ipv4DenyList); i++ { + ipv4DenyList[i] = fmt.Sprintf("192.168.1.%d/32", i) + ipv6DenyList[i] = fmt.Sprintf("2001:db8::%x/128", i) + } + + var jsonV4DenyList, jsonV6DenyList []byte + jsonV4DenyList, err := json.Marshal(ipv4DenyList) + if err != nil { + t.Error("Could not marshal ipv4DenyList into json") + } + jsonV6DenyList, err = json.Marshal(ipv6DenyList) + if err != nil { + t.Error("Could not marshal ipv6DenyList into json") + } + + denyListJSON := fmt.Sprintf(`{ + "denyList": { + "ipv4": %s, + "ipv6": %s + } + }`, string(jsonV4DenyList), string(jsonV6DenyList)) + + svc := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: strings.Repeat(randString(), 6), + UID: "foobar123", + Annotations: map[string]string{ + annotations.AnnLinodeCloudFirewallACL: denyListJSON, + }, + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{ + { + Name: randString(), + Protocol: "TCP", + Port: int32(80), + NodePort: int32(30000), + }, + }, + }, + } + + nodes := []*v1.Node{ + { + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + { + Type: v1.NodeInternalIP, + Address: "127.0.0.1", + }, + }, + }, + }, + } + + lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + if !assertion { + t.Error("type assertion failed") + } + fakeClientset := fake.NewSimpleClientset() + lb.kubeClient = fakeClientset + + defer func() { + _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) + }() + + lbStatus, err := lb.EnsureLoadBalancer(t.Context(), "linodelb", svc, nodes) + if err != nil { + t.Errorf("EnsureLoadBalancer returned an error: %s", err) + } + svc.Status.LoadBalancer = *lbStatus + stubService(fakeClientset, svc) + + svc.ObjectMeta.SetAnnotations(map[string]string{ + annotations.AnnLinodeCloudFirewallACL: `{ + "denyList": { + "ipv4": ["192.168.1.0/32"], + "ipv6": ["2001:db8::/128"] + } + }`, + }) + + err = lb.UpdateLoadBalancer(t.Context(), "linodelb", svc, nodes) + if err != nil { + t.Fatalf("UpdateLoadBalancer returned an error with updated annotations: %s", err) + } +} + func testUpdateLoadBalancerAddTags(t *testing.T, client *linodego.Client, _ *fakeAPI) { + t.Helper() + svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: randString(), @@ -848,16 +1189,19 @@ func testUpdateLoadBalancerAddTags(t *testing.T, client *linodego.Client, _ *fak }, } - lb := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + if !assertion { + t.Error("type assertion failed") + } fakeClientset := fake.NewSimpleClientset() lb.kubeClient = fakeClientset clusterName := "linodelb" defer func() { - _ = lb.EnsureLoadBalancerDeleted(context.TODO(), clusterName, svc) + _ = lb.EnsureLoadBalancerDeleted(t.Context(), clusterName, svc) }() - lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), clusterName, svc, nodes) + lbStatus, err := lb.EnsureLoadBalancer(t.Context(), clusterName, svc, nodes) if err != nil { t.Errorf("EnsureLoadBalancer returned an error: %s", err) } @@ -869,12 +1213,12 @@ func testUpdateLoadBalancerAddTags(t *testing.T, client *linodego.Client, _ *fak annotations.AnnLinodeLoadBalancerTags: testTags, }) - err = lb.UpdateLoadBalancer(context.TODO(), clusterName, svc, nodes) + err = lb.UpdateLoadBalancer(t.Context(), clusterName, svc, nodes) if err != nil { t.Fatalf("UpdateLoadBalancer returned an error while updated annotations: %s", err) } - nb, err := lb.getNodeBalancerByStatus(context.TODO(), svc) + nb, err := lb.getNodeBalancerByStatus(t.Context(), svc) if err != nil { t.Fatalf("failed to get NodeBalancer by status: %v", err) } @@ -888,6 +1232,8 @@ func testUpdateLoadBalancerAddTags(t *testing.T, client *linodego.Client, _ *fak } func testUpdateLoadBalancerAddTLSPort(t *testing.T, client *linodego.Client, _ *fakeAPI) { + t.Helper() + svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: randString(), @@ -928,17 +1274,20 @@ func testUpdateLoadBalancerAddTLSPort(t *testing.T, client *linodego.Client, _ * NodePort: int32(30001), } - lb := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + if !assertion { + t.Error("type assertion failed") + } defer func() { - _ = lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) + _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) }() fakeClientset := fake.NewSimpleClientset() lb.kubeClient = fakeClientset addTLSSecret(t, lb.kubeClient) - lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + lbStatus, err := lb.EnsureLoadBalancer(t.Context(), "linodelb", svc, nodes) if err != nil { t.Errorf("EnsureLoadBalancer returned an error: %s", err) } @@ -949,17 +1298,17 @@ func testUpdateLoadBalancerAddTLSPort(t *testing.T, client *linodego.Client, _ * svc.ObjectMeta.SetAnnotations(map[string]string{ annotations.AnnLinodePortConfigPrefix + "443": `{ "protocol": "https", "tls-secret-name": "tls-secret"}`, }) - err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, nodes) + err = lb.UpdateLoadBalancer(t.Context(), "linodelb", svc, nodes) if err != nil { t.Fatalf("UpdateLoadBalancer returned an error while updated annotations: %s", err) } - nb, err := lb.getNodeBalancerByStatus(context.TODO(), svc) + nb, err := lb.getNodeBalancerByStatus(t.Context(), svc) if err != nil { t.Fatalf("unexpected error: %v", err) } - cfgs, errConfigs := client.ListNodeBalancerConfigs(context.TODO(), nb.ID, nil) + cfgs, errConfigs := client.ListNodeBalancerConfigs(t.Context(), nb.ID, nil) if errConfigs != nil { t.Fatalf("error getting NodeBalancer configs: %v", errConfigs) } @@ -972,7 +1321,7 @@ func testUpdateLoadBalancerAddTLSPort(t *testing.T, client *linodego.Client, _ * observedPorts := make(map[int]struct{}) for _, cfg := range cfgs { - nodes, errNodes := client.ListNodeBalancerNodes(context.TODO(), nb.ID, cfg.ID, nil) + nodes, errNodes := client.ListNodeBalancerNodes(t.Context(), nb.ID, cfg.ID, nil) if errNodes != nil { t.Errorf("error getting NodeBalancer nodes: %v", errNodes) } @@ -990,6 +1339,8 @@ func testUpdateLoadBalancerAddTLSPort(t *testing.T, client *linodego.Client, _ * } func testUpdateLoadBalancerAddProxyProtocol(t *testing.T, client *linodego.Client, fakeAPI *fakeAPI) { + t.Helper() + nodes := []*v1.Node{ { Status: v1.NodeStatus{ @@ -1003,7 +1354,10 @@ func testUpdateLoadBalancerAddProxyProtocol(t *testing.T, client *linodego.Clien }, } - lb := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + if !assertion { + t.Error("type assertion failed") + } fakeClientset := fake.NewSimpleClientset() lb.kubeClient = fakeClientset @@ -1050,9 +1404,9 @@ func testUpdateLoadBalancerAddProxyProtocol(t *testing.T, client *linodego.Clien } defer func() { - _ = lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) + _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) }() - nodeBalancer, err := client.CreateNodeBalancer(context.TODO(), linodego.NodeBalancerCreateOptions{ + nodeBalancer, err := client.CreateNodeBalancer(t.Context(), linodego.NodeBalancerCreateOptions{ Region: lb.zone, }) if err != nil { @@ -1065,7 +1419,7 @@ func testUpdateLoadBalancerAddProxyProtocol(t *testing.T, client *linodego.Clien }) stubService(fakeClientset, svc) - if err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, nodes); err != nil { + if err = lb.UpdateLoadBalancer(t.Context(), "linodelb", svc, nodes); err != nil { expectedErrMessage := fmt.Sprintf("invalid NodeBalancer proxy protocol value '%s'", tc.proxyProtocolConfig) if tc.invalidErr && err.Error() == expectedErrMessage { return @@ -1077,7 +1431,7 @@ func testUpdateLoadBalancerAddProxyProtocol(t *testing.T, client *linodego.Clien t.Fatal("expected UpdateLoadBalancer to return an error") } - nodeBalancerConfigs, err := client.ListNodeBalancerConfigs(context.TODO(), nodeBalancer.ID, nil) + nodeBalancerConfigs, err := client.ListNodeBalancerConfigs(t.Context(), nodeBalancer.ID, nil) if err != nil { t.Fatalf("failed to get NodeBalancer: %s", err) } @@ -1093,6 +1447,8 @@ func testUpdateLoadBalancerAddProxyProtocol(t *testing.T, client *linodego.Clien } func testUpdateLoadBalancerAddNewFirewall(t *testing.T, client *linodego.Client, fakeAPI *fakeAPI) { + t.Helper() + svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: randString(), @@ -1126,22 +1482,25 @@ func testUpdateLoadBalancerAddNewFirewall(t *testing.T, client *linodego.Client, }, } - lb := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + if !assertion { + t.Error("type assertion failed") + } fakeClientset := fake.NewSimpleClientset() lb.kubeClient = fakeClientset defer func() { - _ = lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) + _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) }() - lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + lbStatus, err := lb.EnsureLoadBalancer(t.Context(), "linodelb", svc, nodes) if err != nil { t.Errorf("EnsureLoadBalancer returned an error: %s", err) } svc.Status.LoadBalancer = *lbStatus stubService(fakeClientset, svc) fwClient := firewall.LinodeClient{Client: client} - fw, err := fwClient.CreateFirewall(context.TODO(), linodego.FirewallCreateOptions{ + fw, err := fwClient.CreateFirewall(t.Context(), linodego.FirewallCreateOptions{ Label: "test", Rules: linodego.FirewallRuleSet{Inbound: []linodego.FirewallRule{{ Action: "ACCEPT", @@ -1158,24 +1517,24 @@ func testUpdateLoadBalancerAddNewFirewall(t *testing.T, client *linodego.Client, t.Errorf("CreatingFirewall returned an error: %s", err) } defer func() { - _ = fwClient.DeleteFirewall(context.TODO(), fw) + _ = fwClient.DeleteFirewall(t.Context(), fw) }() svc.ObjectMeta.SetAnnotations(map[string]string{ annotations.AnnLinodeCloudFirewallID: strconv.Itoa(fw.ID), }) - err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, nodes) + err = lb.UpdateLoadBalancer(t.Context(), "linodelb", svc, nodes) if err != nil { t.Errorf("UpdateLoadBalancer returned an error while updated annotations: %s", err) } - nb, err := lb.getNodeBalancerByStatus(context.TODO(), svc) + nb, err := lb.getNodeBalancerByStatus(t.Context(), svc) if err != nil { t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewalls, err := lb.client.ListNodeBalancerFirewalls(context.TODO(), nb.ID, &linodego.ListOptions{}) + firewalls, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("failed to List Firewalls %s", err) } @@ -1191,6 +1550,8 @@ func testUpdateLoadBalancerAddNewFirewall(t *testing.T, client *linodego.Client, // This will also test the firewall with >255 IPs func testUpdateLoadBalancerAddNewFirewallACL(t *testing.T, client *linodego.Client, fakeAPI *fakeAPI) { + t.Helper() + svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: randString(), @@ -1221,26 +1582,29 @@ func testUpdateLoadBalancerAddNewFirewallACL(t *testing.T, client *linodego.Clie }, } - lb := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + if !assertion { + t.Error("type assertion failed") + } fakeClientset := fake.NewSimpleClientset() lb.kubeClient = fakeClientset defer func() { - _ = lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) + _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) }() - lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + lbStatus, err := lb.EnsureLoadBalancer(t.Context(), "linodelb", svc, nodes) if err != nil { t.Errorf("EnsureLoadBalancer returned an error: %s", err) } svc.Status.LoadBalancer = *lbStatus stubService(fakeClientset, svc) - nb, err := lb.getNodeBalancerByStatus(context.TODO(), svc) + nb, err := lb.getNodeBalancerByStatus(t.Context(), svc) if err != nil { t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewalls, err := lb.client.ListNodeBalancerFirewalls(context.TODO(), nb.ID, &linodego.ListOptions{}) + firewalls, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("Failed to list nodeBalancer firewalls %s", err) } @@ -1249,8 +1613,8 @@ func testUpdateLoadBalancerAddNewFirewallACL(t *testing.T, client *linodego.Clie t.Fatalf("Firewalls attached when none specified") } - var ipv4s []string - var ipv6s []string + ipv4s := make([]string, 0, 400) + ipv6s := make([]string, 0, 300) i := 0 for i < 400 { ipv4s = append(ipv4s, fmt.Sprintf("%d.%d.%d.%d", 192, rand.Int31n(255), rand.Int31n(255), rand.Int31n(255))) @@ -1259,7 +1623,7 @@ func testUpdateLoadBalancerAddNewFirewallACL(t *testing.T, client *linodego.Clie i = 0 for i < 300 { ip := make([]byte, 16) - if _, err := cryptoRand.Read(ip); err != nil { + if _, err = cryptoRand.Read(ip); err != nil { t.Fatalf("unable to read random bytes") } ipv6s = append(ipv6s, fmt.Sprintf("%s:%s:%s:%s:%s:%s:%s:%s", @@ -1288,17 +1652,17 @@ func testUpdateLoadBalancerAddNewFirewallACL(t *testing.T, client *linodego.Clie annotations.AnnLinodeCloudFirewallACL: string(aclString), }) - err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, nodes) + err = lb.UpdateLoadBalancer(t.Context(), "linodelb", svc, nodes) if err != nil { t.Errorf("UpdateLoadBalancer returned an error: %s", err) } - nbUpdated, err := lb.getNodeBalancerByStatus(context.TODO(), svc) + nbUpdated, err := lb.getNodeBalancerByStatus(t.Context(), svc) if err != nil { t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewallsNew, err := lb.client.ListNodeBalancerFirewalls(context.TODO(), nbUpdated.ID, &linodego.ListOptions{}) + firewallsNew, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("failed to List Firewalls %s", err) } @@ -1307,7 +1671,7 @@ func testUpdateLoadBalancerAddNewFirewallACL(t *testing.T, client *linodego.Clie t.Fatalf("No firewalls found") } - if firewallsNew[0].Rules.InboundPolicy != "DROP" { + if firewallsNew[0].Rules.InboundPolicy != drop { t.Errorf("expected DROP inbound policy, got %s", firewallsNew[0].Rules.InboundPolicy) } @@ -1317,6 +1681,8 @@ func testUpdateLoadBalancerAddNewFirewallACL(t *testing.T, client *linodego.Clie } func testUpdateLoadBalancerDeleteFirewallRemoveACL(t *testing.T, client *linodego.Client, fakeAPI *fakeAPI) { + t.Helper() + svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: randString(), @@ -1347,7 +1713,10 @@ func testUpdateLoadBalancerDeleteFirewallRemoveACL(t *testing.T, client *linodeg }, } - lb := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + if !assertion { + t.Error("type assertion failed") + } fakeClientset := fake.NewSimpleClientset() lb.kubeClient = fakeClientset @@ -1360,21 +1729,21 @@ func testUpdateLoadBalancerDeleteFirewallRemoveACL(t *testing.T, client *linodeg }) defer func() { - _ = lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) + _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) }() - lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + lbStatus, err := lb.EnsureLoadBalancer(t.Context(), "linodelb", svc, nodes) if err != nil { t.Errorf("EnsureLoadBalancer returned an error: %s", err) } svc.Status.LoadBalancer = *lbStatus stubService(fakeClientset, svc) - nb, err := lb.getNodeBalancerByStatus(context.TODO(), svc) + nb, err := lb.getNodeBalancerByStatus(t.Context(), svc) if err != nil { t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewalls, err := lb.client.ListNodeBalancerFirewalls(context.TODO(), nb.ID, &linodego.ListOptions{}) + firewalls, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("Failed to list nodeBalancer firewalls %s", err) } @@ -1383,7 +1752,7 @@ func testUpdateLoadBalancerDeleteFirewallRemoveACL(t *testing.T, client *linodeg t.Fatalf("No firewalls attached") } - if firewalls[0].Rules.InboundPolicy != "DROP" { + if firewalls[0].Rules.InboundPolicy != drop { t.Errorf("expected DROP inbound policy, got %s", firewalls[0].Rules.InboundPolicy) } @@ -1394,12 +1763,12 @@ func testUpdateLoadBalancerDeleteFirewallRemoveACL(t *testing.T, client *linodeg svc.ObjectMeta.SetAnnotations(map[string]string{}) - err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, nodes) + err = lb.UpdateLoadBalancer(t.Context(), "linodelb", svc, nodes) if err != nil { t.Errorf("UpdateLoadBalancer returned an error: %s", err) } - firewallsNew, err := lb.client.ListNodeBalancerFirewalls(context.TODO(), nb.ID, &linodego.ListOptions{}) + firewallsNew, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("failed to List Firewalls %s", err) } @@ -1410,6 +1779,8 @@ func testUpdateLoadBalancerDeleteFirewallRemoveACL(t *testing.T, client *linodeg } func testUpdateLoadBalancerUpdateFirewallRemoveACLaddID(t *testing.T, client *linodego.Client, fakeAPI *fakeAPI) { + t.Helper() + svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: randString(), @@ -1440,7 +1811,10 @@ func testUpdateLoadBalancerUpdateFirewallRemoveACLaddID(t *testing.T, client *li }, } - lb := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + if !assertion { + t.Error("type assertion failed") + } fakeClientset := fake.NewSimpleClientset() lb.kubeClient = fakeClientset @@ -1453,21 +1827,21 @@ func testUpdateLoadBalancerUpdateFirewallRemoveACLaddID(t *testing.T, client *li }) defer func() { - _ = lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) + _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) }() - lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + lbStatus, err := lb.EnsureLoadBalancer(t.Context(), "linodelb", svc, nodes) if err != nil { t.Errorf("EnsureLoadBalancer returned an error: %s", err) } svc.Status.LoadBalancer = *lbStatus stubService(fakeClientset, svc) - nb, err := lb.getNodeBalancerByStatus(context.TODO(), svc) + nb, err := lb.getNodeBalancerByStatus(t.Context(), svc) if err != nil { t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewalls, err := lb.client.ListNodeBalancerFirewalls(context.TODO(), nb.ID, &linodego.ListOptions{}) + firewalls, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("Failed to list nodeBalancer firewalls %s", err) } @@ -1476,7 +1850,7 @@ func testUpdateLoadBalancerUpdateFirewallRemoveACLaddID(t *testing.T, client *li t.Fatalf("No firewalls attached") } - if firewalls[0].Rules.InboundPolicy != "DROP" { + if firewalls[0].Rules.InboundPolicy != drop { t.Errorf("expected DROP inbound policy, got %s", firewalls[0].Rules.InboundPolicy) } @@ -1486,7 +1860,7 @@ func testUpdateLoadBalancerUpdateFirewallRemoveACLaddID(t *testing.T, client *li } fwClient := firewall.LinodeClient{Client: client} - fw, err := fwClient.CreateFirewall(context.TODO(), linodego.FirewallCreateOptions{ + fw, err := fwClient.CreateFirewall(t.Context(), linodego.FirewallCreateOptions{ Label: "test", Rules: linodego.FirewallRuleSet{Inbound: []linodego.FirewallRule{{ Action: "ACCEPT", @@ -1503,24 +1877,24 @@ func testUpdateLoadBalancerUpdateFirewallRemoveACLaddID(t *testing.T, client *li t.Errorf("Error creating firewall %s", err) } defer func() { - _ = fwClient.DeleteFirewall(context.TODO(), fw) + _ = fwClient.DeleteFirewall(t.Context(), fw) }() svc.ObjectMeta.SetAnnotations(map[string]string{ annotations.AnnLinodeCloudFirewallID: strconv.Itoa(fw.ID), }) - err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, nodes) + err = lb.UpdateLoadBalancer(t.Context(), "linodelb", svc, nodes) if err != nil { t.Errorf("UpdateLoadBalancer returned an error: %s", err) } - nbUpdated, err := lb.getNodeBalancerByStatus(context.TODO(), svc) + nbUpdated, err := lb.getNodeBalancerByStatus(t.Context(), svc) if err != nil { t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewallsNew, err := lb.client.ListNodeBalancerFirewalls(context.TODO(), nbUpdated.ID, &linodego.ListOptions{}) + firewallsNew, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("failed to List Firewalls %s", err) } @@ -1544,6 +1918,8 @@ func testUpdateLoadBalancerUpdateFirewallRemoveACLaddID(t *testing.T, client *li } func testUpdateLoadBalancerUpdateFirewallRemoveIDaddACL(t *testing.T, client *linodego.Client, fakeAPI *fakeAPI) { + t.Helper() + svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: randString(), @@ -1574,12 +1950,15 @@ func testUpdateLoadBalancerUpdateFirewallRemoveIDaddACL(t *testing.T, client *li }, } - lb := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + if !assertion { + t.Error("type assertion failed") + } fakeClientset := fake.NewSimpleClientset() lb.kubeClient = fakeClientset fwClient := firewall.LinodeClient{Client: client} - fw, err := fwClient.CreateFirewall(context.TODO(), linodego.FirewallCreateOptions{ + fw, err := fwClient.CreateFirewall(t.Context(), linodego.FirewallCreateOptions{ Label: "test", Rules: linodego.FirewallRuleSet{Inbound: []linodego.FirewallRule{{ Action: "ACCEPT", @@ -1596,7 +1975,7 @@ func testUpdateLoadBalancerUpdateFirewallRemoveIDaddACL(t *testing.T, client *li t.Errorf("Error creating firewall %s", err) } defer func() { - _ = fwClient.DeleteFirewall(context.TODO(), fw) + _ = fwClient.DeleteFirewall(t.Context(), fw) }() svc.ObjectMeta.SetAnnotations(map[string]string{ @@ -1604,21 +1983,21 @@ func testUpdateLoadBalancerUpdateFirewallRemoveIDaddACL(t *testing.T, client *li }) defer func() { - _ = lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) + _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) }() - lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + lbStatus, err := lb.EnsureLoadBalancer(t.Context(), "linodelb", svc, nodes) if err != nil { t.Errorf("EnsureLoadBalancer returned an error: %s", err) } svc.Status.LoadBalancer = *lbStatus stubService(fakeClientset, svc) - nb, err := lb.getNodeBalancerByStatus(context.TODO(), svc) + nb, err := lb.getNodeBalancerByStatus(t.Context(), svc) if err != nil { t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewalls, err := lb.client.ListNodeBalancerFirewalls(context.TODO(), nb.ID, &linodego.ListOptions{}) + firewalls, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("Failed to list nodeBalancer firewalls %s", err) } @@ -1643,17 +2022,17 @@ func testUpdateLoadBalancerUpdateFirewallRemoveIDaddACL(t *testing.T, client *li }`, }) - err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, nodes) + err = lb.UpdateLoadBalancer(t.Context(), "linodelb", svc, nodes) if err != nil { t.Errorf("UpdateLoadBalancer returned an error: %s", err) } - nbUpdated, err := lb.getNodeBalancerByStatus(context.TODO(), svc) + nbUpdated, err := lb.getNodeBalancerByStatus(t.Context(), svc) if err != nil { t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewallsNew, err := lb.client.ListNodeBalancerFirewalls(context.TODO(), nbUpdated.ID, &linodego.ListOptions{}) + firewallsNew, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("failed to List Firewalls %s", err) } @@ -1662,7 +2041,7 @@ func testUpdateLoadBalancerUpdateFirewallRemoveIDaddACL(t *testing.T, client *li t.Fatalf("No attached firewalls found") } - if firewallsNew[0].Rules.InboundPolicy != "DROP" { + if firewallsNew[0].Rules.InboundPolicy != drop { t.Errorf("expected DROP inbound policy, got %s", firewallsNew[0].Rules.InboundPolicy) } @@ -1677,6 +2056,8 @@ func testUpdateLoadBalancerUpdateFirewallRemoveIDaddACL(t *testing.T, client *li } func testUpdateLoadBalancerUpdateFirewallACL(t *testing.T, client *linodego.Client, fakeAPI *fakeAPI) { + t.Helper() + svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: randString(), @@ -1714,26 +2095,29 @@ func testUpdateLoadBalancerUpdateFirewallACL(t *testing.T, client *linodego.Clie }, } - lb := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + if !assertion { + t.Error("type assertion failed") + } fakeClientset := fake.NewSimpleClientset() lb.kubeClient = fakeClientset defer func() { - _ = lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) + _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) }() - lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + lbStatus, err := lb.EnsureLoadBalancer(t.Context(), "linodelb", svc, nodes) if err != nil { t.Errorf("EnsureLoadBalancer returned an error: %s", err) } svc.Status.LoadBalancer = *lbStatus stubService(fakeClientset, svc) - nb, err := lb.getNodeBalancerByStatus(context.TODO(), svc) + nb, err := lb.getNodeBalancerByStatus(t.Context(), svc) if err != nil { t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewalls, err := lb.client.ListNodeBalancerFirewalls(context.TODO(), nb.ID, &linodego.ListOptions{}) + firewalls, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("Failed to list nodeBalancer firewalls %s", err) } @@ -1742,7 +2126,7 @@ func testUpdateLoadBalancerUpdateFirewallACL(t *testing.T, client *linodego.Clie t.Fatalf("No firewalls attached") } - if firewalls[0].Rules.InboundPolicy != "DROP" { + if firewalls[0].Rules.InboundPolicy != drop { t.Errorf("expected DROP inbound policy, got %s", firewalls[0].Rules.InboundPolicy) } @@ -1751,8 +2135,6 @@ func testUpdateLoadBalancerUpdateFirewallACL(t *testing.T, client *linodego.Clie t.Errorf("expected ips, got %v", fwIPs) } - fmt.Printf("got %v", fwIPs) - // Add ipv6 ips in allowList svc.ObjectMeta.SetAnnotations(map[string]string{ annotations.AnnLinodeCloudFirewallACL: `{ @@ -1763,17 +2145,17 @@ func testUpdateLoadBalancerUpdateFirewallACL(t *testing.T, client *linodego.Clie }`, }) - err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, nodes) + err = lb.UpdateLoadBalancer(t.Context(), "linodelb", svc, nodes) if err != nil { t.Errorf("UpdateLoadBalancer returned an error: %s", err) } - nbUpdated, err := lb.getNodeBalancerByStatus(context.TODO(), svc) + nbUpdated, err := lb.getNodeBalancerByStatus(t.Context(), svc) if err != nil { t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewallsNew, err := lb.client.ListNodeBalancerFirewalls(context.TODO(), nbUpdated.ID, &linodego.ListOptions{}) + firewallsNew, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("failed to List Firewalls %s", err) } @@ -1809,17 +2191,17 @@ func testUpdateLoadBalancerUpdateFirewallACL(t *testing.T, client *linodego.Clie }`, }) - err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, nodes) + err = lb.UpdateLoadBalancer(t.Context(), "linodelb", svc, nodes) if err != nil { t.Errorf("UpdateLoadBalancer returned an error: %s", err) } - nbUpdated, err = lb.getNodeBalancerByStatus(context.TODO(), svc) + nbUpdated, err = lb.getNodeBalancerByStatus(t.Context(), svc) if err != nil { t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewallsNew, err = lb.client.ListNodeBalancerFirewalls(context.TODO(), nbUpdated.ID, &linodego.ListOptions{}) + firewallsNew, err = lb.client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("failed to List Firewalls %s", err) } @@ -1855,17 +2237,17 @@ func testUpdateLoadBalancerUpdateFirewallACL(t *testing.T, client *linodego.Clie }`, }) - err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, nodes) + err = lb.UpdateLoadBalancer(t.Context(), "linodelb", svc, nodes) if err != nil { t.Errorf("UpdateLoadBalancer returned an error: %s", err) } - nbUpdated, err = lb.getNodeBalancerByStatus(context.TODO(), svc) + nbUpdated, err = lb.getNodeBalancerByStatus(t.Context(), svc) if err != nil { t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewallsNew, err = lb.client.ListNodeBalancerFirewalls(context.TODO(), nbUpdated.ID, &linodego.ListOptions{}) + firewallsNew, err = lb.client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("failed to List Firewalls %s", err) } @@ -1892,13 +2274,15 @@ func testUpdateLoadBalancerUpdateFirewallACL(t *testing.T, client *linodego.Clie } // Run update with same ACL - err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, nodes) + err = lb.UpdateLoadBalancer(t.Context(), "linodelb", svc, nodes) if err != nil { t.Errorf("UpdateLoadBalancer returned an error: %s", err) } } func testUpdateLoadBalancerUpdateFirewall(t *testing.T, client *linodego.Client, fakeAPI *fakeAPI) { + t.Helper() + firewallCreateOpts := linodego.FirewallCreateOptions{ Label: "test", Rules: linodego.FirewallRuleSet{Inbound: []linodego.FirewallRule{{ @@ -1946,39 +2330,42 @@ func testUpdateLoadBalancerUpdateFirewall(t *testing.T, client *linodego.Client, }, } - lb := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + if !assertion { + t.Error("type assertion failed") + } fakeClientset := fake.NewSimpleClientset() lb.kubeClient = fakeClientset defer func() { - _ = lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) + _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) }() fwClient := firewall.LinodeClient{Client: client} - fw, err := fwClient.CreateFirewall(context.TODO(), firewallCreateOpts) + fw, err := fwClient.CreateFirewall(t.Context(), firewallCreateOpts) if err != nil { t.Errorf("Error creating firewall %s", err) } defer func() { - _ = fwClient.DeleteFirewall(context.TODO(), fw) + _ = fwClient.DeleteFirewall(t.Context(), fw) }() svc.ObjectMeta.SetAnnotations(map[string]string{ annotations.AnnLinodeCloudFirewallID: strconv.Itoa(fw.ID), }) - lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + lbStatus, err := lb.EnsureLoadBalancer(t.Context(), "linodelb", svc, nodes) if err != nil { t.Errorf("EnsureLoadBalancer returned an error: %s", err) } svc.Status.LoadBalancer = *lbStatus stubService(fakeClientset, svc) - nb, err := lb.getNodeBalancerByStatus(context.TODO(), svc) + nb, err := lb.getNodeBalancerByStatus(t.Context(), svc) if err != nil { t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewalls, err := lb.client.ListNodeBalancerFirewalls(context.TODO(), nb.ID, &linodego.ListOptions{}) + firewalls, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("Failed to list nodeBalancer firewalls %s", err) } @@ -1992,29 +2379,29 @@ func testUpdateLoadBalancerUpdateFirewall(t *testing.T, client *linodego.Client, } firewallCreateOpts.Label = "test2" - firewallNew, err := fwClient.CreateFirewall(context.TODO(), firewallCreateOpts) + firewallNew, err := fwClient.CreateFirewall(t.Context(), firewallCreateOpts) if err != nil { t.Fatalf("Error in creating firewall %s", err) } defer func() { - _ = fwClient.DeleteFirewall(context.TODO(), firewallNew) + _ = fwClient.DeleteFirewall(t.Context(), firewallNew) }() svc.ObjectMeta.SetAnnotations(map[string]string{ annotations.AnnLinodeCloudFirewallID: strconv.Itoa(firewallNew.ID), }) - err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, nodes) + err = lb.UpdateLoadBalancer(t.Context(), "linodelb", svc, nodes) if err != nil { t.Errorf("UpdateLoadBalancer returned an error: %s", err) } - nbUpdated, err := lb.getNodeBalancerByStatus(context.TODO(), svc) + nbUpdated, err := lb.getNodeBalancerByStatus(t.Context(), svc) if err != nil { t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewallsNew, err := lb.client.ListNodeBalancerFirewalls(context.TODO(), nbUpdated.ID, &linodego.ListOptions{}) + firewallsNew, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nbUpdated.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("failed to List Firewalls %s", err) } @@ -2029,6 +2416,8 @@ func testUpdateLoadBalancerUpdateFirewall(t *testing.T, client *linodego.Client, } func testUpdateLoadBalancerDeleteFirewallRemoveID(t *testing.T, client *linodego.Client, fakeAPI *fakeAPI) { + t.Helper() + firewallCreateOpts := linodego.FirewallCreateOptions{ Label: "test", Rules: linodego.FirewallRuleSet{Inbound: []linodego.FirewallRule{{ @@ -2073,40 +2462,43 @@ func testUpdateLoadBalancerDeleteFirewallRemoveID(t *testing.T, client *linodego }, } - lb := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + if !assertion { + t.Error("type assertion failed") + } fakeClientset := fake.NewSimpleClientset() lb.kubeClient = fakeClientset defer func() { - _ = lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) + _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) }() fwClient := firewall.LinodeClient{Client: client} - fw, err := fwClient.CreateFirewall(context.TODO(), firewallCreateOpts) + fw, err := fwClient.CreateFirewall(t.Context(), firewallCreateOpts) if err != nil { t.Errorf("Error in creating firewall %s", err) } defer func() { - _ = fwClient.DeleteFirewall(context.TODO(), fw) + _ = fwClient.DeleteFirewall(t.Context(), fw) }() svc.ObjectMeta.SetAnnotations(map[string]string{ annotations.AnnLinodeCloudFirewallID: strconv.Itoa(fw.ID), }) - lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + lbStatus, err := lb.EnsureLoadBalancer(t.Context(), "linodelb", svc, nodes) if err != nil { t.Errorf("EnsureLoadBalancer returned an error: %s", err) } svc.Status.LoadBalancer = *lbStatus stubService(fakeClientset, svc) - nb, err := lb.getNodeBalancerByStatus(context.TODO(), svc) + nb, err := lb.getNodeBalancerByStatus(t.Context(), svc) if err != nil { t.Fatalf("failed to get NodeBalancer via status: %s", err) } - firewalls, err := lb.client.ListNodeBalancerFirewalls(context.TODO(), nb.ID, &linodego.ListOptions{}) + firewalls, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) if err != nil { t.Errorf("Error in listing firewalls %s", err) } @@ -2121,12 +2513,12 @@ func testUpdateLoadBalancerDeleteFirewallRemoveID(t *testing.T, client *linodego svc.ObjectMeta.SetAnnotations(map[string]string{}) - err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, nodes) + err = lb.UpdateLoadBalancer(t.Context(), "linodelb", svc, nodes) if err != nil { t.Errorf("UpdateLoadBalancer returned an error: %s", err) } - firewallsNew, err := lb.client.ListNodeBalancerFirewalls(context.TODO(), nb.ID, &linodego.ListOptions{}) + firewallsNew, err := lb.client.ListNodeBalancerFirewalls(t.Context(), nb.ID, &linodego.ListOptions{}) if err != nil { t.Fatalf("failed to List Firewalls %s", err) } @@ -2137,6 +2529,8 @@ func testUpdateLoadBalancerDeleteFirewallRemoveID(t *testing.T, client *linodego } func testUpdateLoadBalancerAddNodeBalancerID(t *testing.T, client *linodego.Client, fakeAPI *fakeAPI) { + t.Helper() + svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: randString(), @@ -2168,15 +2562,18 @@ func testUpdateLoadBalancerAddNodeBalancerID(t *testing.T, client *linodego.Clie }, } - lb := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + if !assertion { + t.Error("type assertion failed") + } defer func() { - _ = lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) + _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) }() fakeClientset := fake.NewSimpleClientset() lb.kubeClient = fakeClientset - nodeBalancer, err := client.CreateNodeBalancer(context.TODO(), linodego.NodeBalancerCreateOptions{ + nodeBalancer, err := client.CreateNodeBalancer(t.Context(), linodego.NodeBalancerCreateOptions{ Region: lb.zone, }) if err != nil { @@ -2185,7 +2582,7 @@ func testUpdateLoadBalancerAddNodeBalancerID(t *testing.T, client *linodego.Clie svc.Status.LoadBalancer = *makeLoadBalancerStatus(svc, nodeBalancer) - newNodeBalancer, err := client.CreateNodeBalancer(context.TODO(), linodego.NodeBalancerCreateOptions{ + newNodeBalancer, err := client.CreateNodeBalancer(t.Context(), linodego.NodeBalancerCreateOptions{ Region: lb.zone, }) if err != nil { @@ -2196,13 +2593,13 @@ func testUpdateLoadBalancerAddNodeBalancerID(t *testing.T, client *linodego.Clie svc.ObjectMeta.SetAnnotations(map[string]string{ annotations.AnnLinodeNodeBalancerID: strconv.Itoa(newNodeBalancer.ID), }) - err = lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, nodes) + err = lb.UpdateLoadBalancer(t.Context(), "linodelb", svc, nodes) if err != nil { t.Errorf("UpdateLoadBalancer returned an error while updated annotations: %s", err) } clusterName := strings.TrimPrefix(svc.Namespace, "kube-system-") - lbStatus, _, err := lb.GetLoadBalancer(context.TODO(), clusterName, svc) + lbStatus, _, err := lb.GetLoadBalancer(t.Context(), clusterName, svc) if err != nil { t.Errorf("GetLoadBalancer returned an error: %s", err) } @@ -2462,12 +2859,12 @@ func Test_getPortConfig(t *testing.T) { for _, test := range testcases { t.Run(test.name, func(t *testing.T) { testPort := 443 - portConfig, err := getPortConfig(test.service, testPort) + portConfigResult, err := getPortConfig(test.service, testPort) - if !reflect.DeepEqual(portConfig, test.expectedPortConfig) { + if !reflect.DeepEqual(portConfigResult, test.expectedPortConfig) { t.Error("unexpected port config") t.Logf("expected: %q", test.expectedPortConfig) - t.Logf("actual: %q", portConfig) + t.Logf("actual: %q", portConfigResult) } if !reflect.DeepEqual(err, test.err) { @@ -2548,9 +2945,10 @@ func Test_getHealthCheckType(t *testing.T) { func Test_getNodePrivateIP(t *testing.T) { testcases := []struct { - name string - node *v1.Node - address string + name string + node *v1.Node + address string + subnetID int }{ { "node internal ip specified", @@ -2565,6 +2963,7 @@ func Test_getNodePrivateIP(t *testing.T) { }, }, "127.0.0.1", + 0, }, { "node internal ip not specified", @@ -2579,6 +2978,7 @@ func Test_getNodePrivateIP(t *testing.T) { }, }, "", + 0, }, { "node internal ip annotation present", @@ -2598,12 +2998,33 @@ func Test_getNodePrivateIP(t *testing.T) { }, }, "192.168.42.42", + 0, + }, + { + "node internal ip annotation present and subnet id is not zero", + &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + annotations.AnnLinodeNodePrivateIP: "192.168.42.42", + }, + }, + Status: v1.NodeStatus{ + Addresses: []v1.NodeAddress{ + { + Type: v1.NodeInternalIP, + Address: "10.0.1.1", + }, + }, + }, + }, + "10.0.1.1", + 100, }, } for _, test := range testcases { t.Run(test.name, func(t *testing.T) { - ip := getNodePrivateIP(test.node) + ip := getNodePrivateIP(test.node, test.subnetID) if ip != test.address { t.Error("unexpected certificate") t.Logf("expected: %q", test.address) @@ -2614,6 +3035,8 @@ func Test_getNodePrivateIP(t *testing.T) { } func testBuildLoadBalancerRequest(t *testing.T, client *linodego.Client, _ *fakeAPI) { + t.Helper() + svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -2651,8 +3074,11 @@ func testBuildLoadBalancerRequest(t *testing.T, client *linodego.Client, _ *fake }, } - lb := newLoadbalancers(client, "us-west").(*loadbalancers) - nb, err := lb.buildLoadBalancerRequest(context.TODO(), "linodelb", svc, nodes) + lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + if !assertion { + t.Error("type assertion failed") + } + nb, err := lb.buildLoadBalancerRequest(t.Context(), "linodelb", svc, nodes) if err != nil { t.Fatal(err) } @@ -2663,7 +3089,7 @@ func testBuildLoadBalancerRequest(t *testing.T, client *linodego.Client, _ *fake t.Logf("actual: %v", err) } - configs, err := client.ListNodeBalancerConfigs(context.TODO(), nb.ID, nil) + configs, err := client.ListNodeBalancerConfigs(t.Context(), nb.ID, nil) if err != nil { t.Fatal(err) } @@ -2674,7 +3100,7 @@ func testBuildLoadBalancerRequest(t *testing.T, client *linodego.Client, _ *fake t.Logf("actual: %v", len(configs)) } - nbNodes, err := client.ListNodeBalancerNodes(context.TODO(), nb.ID, configs[0].ID, nil) + nbNodes, err := client.ListNodeBalancerNodes(t.Context(), nb.ID, configs[0].ID, nil) if err != nil { t.Fatal(err) } @@ -2687,6 +3113,8 @@ func testBuildLoadBalancerRequest(t *testing.T, client *linodego.Client, _ *fake } func testEnsureLoadBalancerPreserveAnnotation(t *testing.T, client *linodego.Client, fake *fakeAPI) { + t.Helper() + testServiceSpec := v1.ServiceSpec{ Ports: []v1.ServicePort{ { @@ -2698,7 +3126,10 @@ func testEnsureLoadBalancerPreserveAnnotation(t *testing.T, client *linodego.Cli }, } - lb := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + if !assertion { + t.Error("type assertion failed") + } for _, test := range []struct { name string deleted bool @@ -2730,13 +3161,13 @@ func testEnsureLoadBalancerPreserveAnnotation(t *testing.T, client *linodego.Cli Spec: testServiceSpec, } - nb, err := lb.createNodeBalancer(context.TODO(), "linodelb", svc, []*linodego.NodeBalancerConfigCreateOptions{}) + nb, err := lb.createNodeBalancer(t.Context(), "linodelb", svc, []*linodego.NodeBalancerConfigCreateOptions{}) if err != nil { t.Fatal(err) } svc.Status.LoadBalancer = *makeLoadBalancerStatus(svc, nb) - err = lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) + err = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) didDelete := fake.didRequestOccur(http.MethodDelete, fmt.Sprintf("/nodebalancers/%d", nb.ID), "") if didDelete && !test.deleted { @@ -2753,6 +3184,8 @@ func testEnsureLoadBalancerPreserveAnnotation(t *testing.T, client *linodego.Cli } func testEnsureLoadBalancerDeleted(t *testing.T, client *linodego.Client, fake *fakeAPI) { + t.Helper() + svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -2810,17 +3243,20 @@ func testEnsureLoadBalancerDeleted(t *testing.T, client *linodego.Client, fake * }, } - lb := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + if !assertion { + t.Error("type assertion failed") + } configs := []*linodego.NodeBalancerConfigCreateOptions{} - _, err := lb.createNodeBalancer(context.TODO(), "linodelb", svc, configs) + _, err := lb.createNodeBalancer(t.Context(), "linodelb", svc, configs) if err != nil { t.Fatal(err) } - defer func() { _ = lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) }() + defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) }() for _, test := range testcases { t.Run(test.name, func(t *testing.T) { - err := lb.EnsureLoadBalancerDeleted(context.TODO(), test.clusterName, test.service) + err := lb.EnsureLoadBalancerDeleted(t.Context(), test.clusterName, test.service) if !reflect.DeepEqual(err, test.err) { t.Error("unexpected error") t.Logf("expected: %v", test.err) @@ -2831,6 +3267,8 @@ func testEnsureLoadBalancerDeleted(t *testing.T, client *linodego.Client, fake * } func testEnsureExistingLoadBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI) { + t.Helper() + svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "testensure", @@ -2858,19 +3296,22 @@ func testEnsureExistingLoadBalancer(t *testing.T, client *linodego.Client, _ *fa }, } - lb := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + if !assertion { + t.Error("type assertion failed") + } lb.kubeClient = fake.NewSimpleClientset() addTLSSecret(t, lb.kubeClient) configs := []*linodego.NodeBalancerConfigCreateOptions{} - nb, err := lb.createNodeBalancer(context.TODO(), "linodelb", svc, configs) + nb, err := lb.createNodeBalancer(t.Context(), "linodelb", svc, configs) if err != nil { t.Fatal(err) } svc.Status.LoadBalancer = *makeLoadBalancerStatus(svc, nb) - defer func() { _ = lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) }() - getLBStatus, exists, err := lb.GetLoadBalancer(context.TODO(), "linodelb", svc) + defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) }() + getLBStatus, exists, err := lb.GetLoadBalancer(t.Context(), "linodelb", svc) if err != nil { t.Fatalf("failed to create nodebalancer: %s", err) } @@ -2938,7 +3379,7 @@ func testEnsureExistingLoadBalancer(t *testing.T, client *linodego.Client, _ *fa for _, test := range testcases { t.Run(test.name, func(t *testing.T) { - lbStatus, err := lb.EnsureLoadBalancer(context.TODO(), test.clusterName, test.service, test.nodes) + lbStatus, err := lb.EnsureLoadBalancer(t.Context(), test.clusterName, test.service, test.nodes) if err != nil { t.Fatal(err) } @@ -2957,6 +3398,8 @@ func testEnsureExistingLoadBalancer(t *testing.T, client *linodego.Client, _ *fa } func testMakeLoadBalancerStatus(t *testing.T, client *linodego.Client, _ *fakeAPI) { + t.Helper() + ipv4 := "192.168.0.1" hostname := "nb-192-168-0-1.newark.nodebalancer.linode.com" nb := &linodego.NodeBalancer{ @@ -2990,7 +3433,76 @@ func testMakeLoadBalancerStatus(t *testing.T, client *linodego.Client, _ *fakeAP } } +func testMakeLoadBalancerStatusWithIPv6(t *testing.T, client *linodego.Client, _ *fakeAPI) { + t.Helper() + + ipv4 := "192.168.0.1" + ipv6 := "2600:3c00::f03c:91ff:fe24:3a2f" + hostname := "nb-192-168-0-1.newark.nodebalancer.linode.com" + nb := &linodego.NodeBalancer{ + IPv4: &ipv4, + IPv6: &ipv6, + Hostname: &hostname, + } + + svc := &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Annotations: make(map[string]string, 1), + }, + } + + // Test with EnableIPv6ForLoadBalancers = false (default) + Options.EnableIPv6ForLoadBalancers = false + expectedStatus := &v1.LoadBalancerStatus{ + Ingress: []v1.LoadBalancerIngress{{ + Hostname: hostname, + IP: ipv4, + }}, + } + status := makeLoadBalancerStatus(svc, nb) + if !reflect.DeepEqual(status, expectedStatus) { + t.Errorf("expected status with EnableIPv6ForLoadBalancers=false to be %#v; got %#v", expectedStatus, status) + } + + // Test with EnableIPv6ForLoadBalancers = true + Options.EnableIPv6ForLoadBalancers = true + expectedStatus = &v1.LoadBalancerStatus{ + Ingress: []v1.LoadBalancerIngress{ + { + Hostname: hostname, + IP: ipv4, + }, + { + Hostname: hostname, + IP: ipv6, + }, + }, + } + status = makeLoadBalancerStatus(svc, nb) + if !reflect.DeepEqual(status, expectedStatus) { + t.Errorf("expected status with EnableIPv6ForLoadBalancers=true to be %#v; got %#v", expectedStatus, status) + } + + // Test with per-service annotation + // Reset the global flag to false and set the annotation + Options.EnableIPv6ForLoadBalancers = false + svc.Annotations[annotations.AnnLinodeEnableIPv6Ingress] = "true" + + // Expect the same result as when the global flag is enabled + status = makeLoadBalancerStatus(svc, nb) + if !reflect.DeepEqual(status, expectedStatus) { + t.Errorf("expected status with %s=true annotation to be %#v; got %#v", + annotations.AnnLinodeEnableIPv6Ingress, expectedStatus, status) + } + + // Reset the flag to its default value + Options.EnableIPv6ForLoadBalancers = false +} + func testMakeLoadBalancerStatusEnvVar(t *testing.T, client *linodego.Client, _ *fakeAPI) { + t.Helper() + ipv4 := "192.168.0.1" hostname := "nb-192-168-0-1.newark.nodebalancer.linode.com" nb := &linodego.NodeBalancer{ @@ -3040,12 +3552,14 @@ func testMakeLoadBalancerStatusEnvVar(t *testing.T, client *linodego.Client, _ * } func testCleanupDoesntCall(t *testing.T, client *linodego.Client, fakeAPI *fakeAPI) { + t.Helper() + region := "us-west" - nb1, err := client.CreateNodeBalancer(context.TODO(), linodego.NodeBalancerCreateOptions{Region: region}) + nb1, err := client.CreateNodeBalancer(t.Context(), linodego.NodeBalancerCreateOptions{Region: region}) if err != nil { t.Fatal(err) } - nb2, err := client.CreateNodeBalancer(context.TODO(), linodego.NodeBalancerCreateOptions{Region: region}) + nb2, err := client.CreateNodeBalancer(t.Context(), linodego.NodeBalancerCreateOptions{Region: region}) if err != nil { t.Fatal(err) } @@ -3059,11 +3573,14 @@ func testCleanupDoesntCall(t *testing.T, client *linodego.Client, fakeAPI *fakeA } svc.Status.LoadBalancer = *makeLoadBalancerStatus(svc, nb1) svcAnn.Status.LoadBalancer = *makeLoadBalancerStatus(svcAnn, nb1) - lb := newLoadbalancers(client, region).(*loadbalancers) + lb, assertion := newLoadbalancers(client, region).(*loadbalancers) + if !assertion { + t.Error("type assertion failed") + } fakeAPI.ResetRequests() t.Run("non-annotated service shouldn't call the API during cleanup", func(t *testing.T) { - if err := lb.cleanupOldNodeBalancer(context.TODO(), svc); err != nil { + if err := lb.cleanupOldNodeBalancer(t.Context(), svc); err != nil { t.Fatal(err) } if len(fakeAPI.requests) != 0 { @@ -3073,7 +3590,7 @@ func testCleanupDoesntCall(t *testing.T, client *linodego.Client, fakeAPI *fakeA fakeAPI.ResetRequests() t.Run("annotated service calls the API to load said NB", func(t *testing.T) { - if err := lb.cleanupOldNodeBalancer(context.TODO(), svcAnn); err != nil { + if err := lb.cleanupOldNodeBalancer(t.Context(), svcAnn); err != nil { t.Fatal(err) } expectedRequests := map[fakeRequest]struct{}{ @@ -3088,6 +3605,8 @@ func testCleanupDoesntCall(t *testing.T, client *linodego.Client, fakeAPI *fakeA } func testUpdateLoadBalancerNoNodes(t *testing.T, client *linodego.Client, _ *fakeAPI) { + t.Helper() + svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: randString(), @@ -3106,15 +3625,18 @@ func testUpdateLoadBalancerNoNodes(t *testing.T, client *linodego.Client, _ *fak }, } - lb := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + if !assertion { + t.Error("type assertion failed") + } defer func() { - _ = lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) + _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) }() fakeClientset := fake.NewSimpleClientset() lb.kubeClient = fakeClientset - nodeBalancer, err := client.CreateNodeBalancer(context.TODO(), linodego.NodeBalancerCreateOptions{ + nodeBalancer, err := client.CreateNodeBalancer(t.Context(), linodego.NodeBalancerCreateOptions{ Region: lb.zone, }) if err != nil { @@ -3129,17 +3651,22 @@ func testUpdateLoadBalancerNoNodes(t *testing.T, client *linodego.Client, _ *fak // setup done, test ensure/update nodes := []*v1.Node{} - if _, err = lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes); !stderrors.Is(err, errNoNodesAvailable) { + if _, err = lb.EnsureLoadBalancer(t.Context(), "linodelb", svc, nodes); !stderrors.Is(err, errNoNodesAvailable) { t.Errorf("EnsureLoadBalancer should return %v, got %v", errNoNodesAvailable, err) } - if err := lb.UpdateLoadBalancer(context.TODO(), "linodelb", svc, nodes); !stderrors.Is(err, errNoNodesAvailable) { + if err := lb.UpdateLoadBalancer(t.Context(), "linodelb", svc, nodes); !stderrors.Is(err, errNoNodesAvailable) { t.Errorf("UpdateLoadBalancer should return %v, got %v", errNoNodesAvailable, err) } } func testGetNodeBalancerForServiceIDDoesNotExist(t *testing.T, client *linodego.Client, _ *fakeAPI) { - lb := newLoadbalancers(client, "us-west").(*loadbalancers) + t.Helper() + + lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + if !assertion { + t.Error("type assertion failed") + } bogusNodeBalancerID := "123456" svc := &v1.Service{ @@ -3162,7 +3689,7 @@ func testGetNodeBalancerForServiceIDDoesNotExist(t *testing.T, client *linodego. }, } - _, err := lb.getNodeBalancerForService(context.TODO(), svc) + _, err := lb.getNodeBalancerForService(t.Context(), svc) if err == nil { t.Fatal("expected getNodeBalancerForService to return an error") } @@ -3178,8 +3705,13 @@ func testGetNodeBalancerForServiceIDDoesNotExist(t *testing.T, client *linodego. } func testEnsureNewLoadBalancerWithNodeBalancerID(t *testing.T, client *linodego.Client, _ *fakeAPI) { - lb := newLoadbalancers(client, "us-west").(*loadbalancers) - nodeBalancer, err := client.CreateNodeBalancer(context.TODO(), linodego.NodeBalancerCreateOptions{ + t.Helper() + + lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + if !assertion { + t.Error("type assertion failed") + } + nodeBalancer, err := client.CreateNodeBalancer(t.Context(), linodego.NodeBalancerCreateOptions{ Region: lb.zone, }) if err != nil { @@ -3222,14 +3754,16 @@ func testEnsureNewLoadBalancerWithNodeBalancerID(t *testing.T, client *linodego. }, } - defer func() { _ = lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) }() + defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) }() - if _, err = lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes); err != nil { + if _, err = lb.EnsureLoadBalancer(t.Context(), "linodelb", svc, nodes); err != nil { t.Fatal(err) } } func testEnsureNewLoadBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI) { + t.Helper() + svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "testensure", @@ -3272,20 +3806,28 @@ func testEnsureNewLoadBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI }, }, } - lb := newLoadbalancers(client, "us-west").(*loadbalancers) + lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + if !assertion { + t.Error("type assertion failed") + } lb.kubeClient = fake.NewSimpleClientset() addTLSSecret(t, lb.kubeClient) - defer func() { _ = lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) }() + defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) }() - _, err := lb.EnsureLoadBalancer(context.TODO(), "linodelb", svc, nodes) + _, err := lb.EnsureLoadBalancer(t.Context(), "linodelb", svc, nodes) if err != nil { t.Fatal(err) } } func testGetLoadBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI) { - lb := newLoadbalancers(client, "us-west").(*loadbalancers) + t.Helper() + + lb, assertion := newLoadbalancers(client, "us-west").(*loadbalancers) + if !assertion { + t.Error("type assertion failed") + } svc := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -3307,11 +3849,11 @@ func testGetLoadBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI) { } configs := []*linodego.NodeBalancerConfigCreateOptions{} - nb, err := lb.createNodeBalancer(context.TODO(), "linodelb", svc, configs) + nb, err := lb.createNodeBalancer(t.Context(), "linodelb", svc, configs) if err != nil { t.Fatal(err) } - defer func() { _ = lb.EnsureLoadBalancerDeleted(context.TODO(), "linodelb", svc) }() + defer func() { _ = lb.EnsureLoadBalancerDeleted(t.Context(), "linodelb", svc) }() lbStatus := makeLoadBalancerStatus(svc, nb) svc.Status.LoadBalancer = *lbStatus @@ -3360,7 +3902,7 @@ func testGetLoadBalancer(t *testing.T, client *linodego.Client, _ *fakeAPI) { for _, test := range testcases { t.Run(test.name, func(t *testing.T) { - _, found, err := lb.GetLoadBalancer(context.TODO(), test.clusterName, test.service) + _, found, err := lb.GetLoadBalancer(t.Context(), test.clusterName, test.service) if found != test.found { t.Error("unexpected error") t.Logf("expected: %v", test.found) @@ -3511,7 +4053,7 @@ func Test_getTLSCertInfo(t *testing.T) { for _, test := range testcases { t.Run(test.name, func(t *testing.T) { - cert, key, err := getTLSCertInfo(context.TODO(), kubeClient, "", test.portConfig) + cert, key, err := getTLSCertInfo(t.Context(), kubeClient, "", test.portConfig) if cert != test.cert { t.Error("unexpected error") t.Logf("expected: %v", test.cert) @@ -3532,7 +4074,9 @@ func Test_getTLSCertInfo(t *testing.T) { } func addTLSSecret(t *testing.T, kubeClient kubernetes.Interface) { - _, err := kubeClient.CoreV1().Secrets("").Create(context.TODO(), &v1.Secret{ + t.Helper() + + _, err := kubeClient.CoreV1().Secrets("").Create(t.Context(), &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "tls-secret", }, @@ -3588,3 +4132,137 @@ func Test_LoadbalNodeNameCoercion(t *testing.T) { } } } + +func Test_loadbalancers_GetLinodeNBType(t *testing.T) { + type fields struct { + client client.Client + zone string + kubeClient kubernetes.Interface + ciliumClient ciliumclient.CiliumV2alpha1Interface + loadBalancerType string + } + type args struct { + service *v1.Service + } + tests := []struct { + name string + fields fields + args args + defaultNB linodego.NodeBalancerPlanType + want linodego.NodeBalancerPlanType + }{ + { + name: "No annotation in service and common as default", + fields: fields{ + client: nil, + zone: "", + kubeClient: nil, + ciliumClient: nil, + loadBalancerType: "nodebalancer", + }, + args: args{ + service: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Annotations: map[string]string{}, + }, + }, + }, + defaultNB: linodego.NBTypeCommon, + want: linodego.NBTypeCommon, + }, + { + name: "No annotation in service and premium as default", + fields: fields{ + client: nil, + zone: "", + kubeClient: nil, + ciliumClient: nil, + loadBalancerType: "nodebalancer", + }, + args: args{ + service: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Annotations: map[string]string{}, + }, + }, + }, + defaultNB: linodego.NBTypePremium, + want: linodego.NBTypePremium, + }, + { + name: "Nodebalancer type annotation in service", + fields: fields{ + client: nil, + zone: "", + kubeClient: nil, + ciliumClient: nil, + loadBalancerType: "nodebalancer", + }, + args: args{ + service: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test", + Annotations: map[string]string{ + annotations.AnnLinodeNodeBalancerType: string(linodego.NBTypePremium), + }, + }, + }, + }, + defaultNB: linodego.NBTypeCommon, + want: linodego.NBTypePremium, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + l := &loadbalancers{ + client: tt.fields.client, + zone: tt.fields.zone, + kubeClient: tt.fields.kubeClient, + ciliumClient: tt.fields.ciliumClient, + loadBalancerType: tt.fields.loadBalancerType, + } + Options.DefaultNBType = string(tt.defaultNB) + if got := l.GetLinodeNBType(tt.args.service); !reflect.DeepEqual(got, tt.want) { + t.Errorf("loadbalancers.GetLinodeNBType() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_validateNodeBalancerBackendIPv4Range(t *testing.T) { + type args struct { + backendIPv4Range string + } + tests := []struct { + name string + args args + wantErr bool + }{ + { + name: "Valid IPv4 range", + args: args{backendIPv4Range: "10.100.0.0/30"}, + wantErr: false, + }, + { + name: "Invalid IPv4 range", + args: args{backendIPv4Range: "10.100.0.0"}, + wantErr: true, + }, + } + + nbBackendSubnet := Options.NodeBalancerBackendIPv4Subnet + defer func() { + Options.NodeBalancerBackendIPv4Subnet = nbBackendSubnet + }() + Options.NodeBalancerBackendIPv4Subnet = "10.100.0.0/24" + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if err := validateNodeBalancerBackendIPv4Range(tt.args.backendIPv4Range); (err != nil) != tt.wantErr { + t.Errorf("validateNodeBalancerBackendIPv4Range() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} diff --git a/cloud/linode/metrics.go b/cloud/linode/metrics.go index a447dfdb..7973b7a1 100644 --- a/cloud/linode/metrics.go +++ b/cloud/linode/metrics.go @@ -3,9 +3,9 @@ package linode import ( "sync" - "github.com/linode/linode-cloud-controller-manager/cloud/linode/client" - "k8s.io/component-base/metrics/legacyregistry" + + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client" ) var registerOnce sync.Once diff --git a/cloud/linode/node_controller.go b/cloud/linode/node_controller.go index 365e4da0..b6f4b2a1 100644 --- a/cloud/linode/node_controller.go +++ b/cloud/linode/node_controller.go @@ -24,10 +24,18 @@ import ( ) const ( - informerResyncPeriod = 1 * time.Minute - defaultMetadataTTL = 300 * time.Second + informerResyncPeriod = 1 * time.Minute + defaultMetadataTTL = 300 * time.Second + defaultK8sNodeCacheTTL = 300 * time.Second ) +var registeredK8sNodeCache *k8sNodeCache = newK8sNodeCache() + +type nodeRequest struct { + node *v1.Node + timestamp time.Time +} + type nodeController struct { sync.RWMutex @@ -39,13 +47,112 @@ type nodeController struct { metadataLastUpdate map[string]time.Time ttl time.Duration - queue workqueue.TypedDelayingInterface[any] + queue workqueue.TypedDelayingInterface[nodeRequest] + nodeLastAdded map[string]time.Time +} + +// k8sNodeCache stores node related info as registered in k8s +type k8sNodeCache struct { + sync.RWMutex + nodes map[string]*v1.Node + providerIDs map[string]string + lastUpdate time.Time + ttl time.Duration +} + +// updateCache updates the k8s node cache with the latest nodes from the k8s API server. +func (c *k8sNodeCache) updateCache(kubeclient kubernetes.Interface) { + c.Lock() + defer c.Unlock() + if time.Since(c.lastUpdate) < c.ttl { + return + } + + nodeList, err := kubeclient.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + klog.Errorf("failed to list nodes, cannot create/update k8s node cache: %s", err) + return + } + + nodes := make(map[string]*v1.Node, len(nodeList.Items)) + providerIDs := make(map[string]string, len(nodeList.Items)) + for _, node := range nodeList.Items { + if node.Spec.ProviderID == "" { + klog.Errorf("Empty providerID [%s] for node %s, skipping it", node.Spec.ProviderID, node.Name) + continue + } + nodes[node.Name] = &node + providerIDs[node.Spec.ProviderID] = node.Name + } + + c.nodes = nodes + c.providerIDs = providerIDs + c.lastUpdate = time.Now() +} + +// addNodeToCache stores the specified node in k8s node cache +func (c *k8sNodeCache) addNodeToCache(node *v1.Node) { + c.Lock() + defer c.Unlock() + if node.Spec.ProviderID == "" { + klog.Errorf("Empty providerID [%s] for node %s, skipping it", node.Spec.ProviderID, node.Name) + return + } + c.nodes[node.Name] = node + c.providerIDs[node.Spec.ProviderID] = node.Name +} + +// getNodeLabel returns the k8s node label for the given provider ID or instance label. +// If the provider ID or label is not found in the cache, it returns an empty string and false. +func (c *k8sNodeCache) getNodeLabel(providerID string, instanceLabel string) (string, bool) { + c.RLock() + defer c.RUnlock() + + // check if instance label matches with the registered k8s node + if _, exists := c.nodes[instanceLabel]; exists { + return instanceLabel, true + } + + // check if provider id matches with the registered k8s node + if label, exists := c.providerIDs[providerID]; exists { + return label, true + } + + return "", false +} + +// getProviderID returns linode specific providerID for given k8s node name +func (c *k8sNodeCache) getProviderID(nodeName string) (string, bool) { + c.RLock() + defer c.RUnlock() + + if node, exists := c.nodes[nodeName]; exists { + return node.Spec.ProviderID, true + } + + return "", false +} + +// newK8sNodeCache returns new k8s node cache instance +func newK8sNodeCache() *k8sNodeCache { + timeout := defaultK8sNodeCacheTTL + if raw, ok := os.LookupEnv("K8S_NODECACHE_TTL"); ok { + if t, err := strconv.Atoi(raw); t > 0 && err == nil { + timeout = time.Duration(t) * time.Second + } + } + + return &k8sNodeCache{ + nodes: make(map[string]*v1.Node, 0), + providerIDs: make(map[string]string, 0), + ttl: timeout, + } } func newNodeController(kubeclient kubernetes.Interface, client client.Client, informer v1informers.NodeInformer, instanceCache *instances) *nodeController { timeout := defaultMetadataTTL if raw, ok := os.LookupEnv("LINODE_METADATA_TTL"); ok { - if t, _ := strconv.Atoi(raw); t > 0 { + if t, err := strconv.Atoi(raw); t > 0 && err == nil { timeout = time.Duration(t) * time.Second } } @@ -57,7 +164,8 @@ func newNodeController(kubeclient kubernetes.Interface, client client.Client, in informer: informer, ttl: timeout, metadataLastUpdate: make(map[string]time.Time), - queue: workqueue.NewTypedDelayingQueueWithConfig[any](workqueue.TypedDelayingQueueConfig[any]{Name: "ccm_node"}), + queue: workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[nodeRequest]{Name: "ccm_node"}), + nodeLastAdded: make(map[string]time.Time), } } @@ -71,7 +179,7 @@ func (s *nodeController) Run(stopCh <-chan struct{}) { } klog.Infof("NodeController will handle newly created node (%s) metadata", node.Name) - s.queue.Add(node) + s.addNodeToQueue(node) }, UpdateFunc: func(oldObj, newObj interface{}) { node, ok := newObj.(*v1.Node) @@ -80,7 +188,7 @@ func (s *nodeController) Run(stopCh <-chan struct{}) { } klog.Infof("NodeController will handle newly updated node (%s) metadata", node.Name) - s.queue.Add(node) + s.addNodeToQueue(node) }, }, informerResyncPeriod, @@ -92,6 +200,15 @@ func (s *nodeController) Run(stopCh <-chan struct{}) { s.informer.Informer().Run(stopCh) } +// addNodeToQueue adds a node to the queue for processing. +func (s *nodeController) addNodeToQueue(node *v1.Node) { + s.Lock() + defer s.Unlock() + currTime := time.Now() + s.nodeLastAdded[node.Name] = currTime + s.queue.Add(nodeRequest{node: node, timestamp: currTime}) +} + // worker runs a worker thread that dequeues new or modified nodes and processes // metadata (host UUID) on each of them. func (s *nodeController) worker() { @@ -100,32 +217,36 @@ func (s *nodeController) worker() { } func (s *nodeController) processNext() bool { - key, quit := s.queue.Get() + request, quit := s.queue.Get() if quit { return false } - defer s.queue.Done(key) + defer s.queue.Done(request) - node, ok := key.(*v1.Node) - if !ok { - klog.Errorf("expected dequeued key to be of type *v1.Node but got %T", node) + s.RLock() + latestTimestamp, exists := s.nodeLastAdded[request.node.Name] + s.RUnlock() + if !exists || request.timestamp.Before(latestTimestamp) { + klog.V(3).InfoS("Skipping node metadata update as its not the most recent object", "node", klog.KObj(request.node)) return true } - - err := s.handleNode(context.TODO(), node) + err := s.handleNode(context.TODO(), request.node) + //nolint: errorlint //switching to errors.Is()/errors.As() causes errors with Code field switch deleteErr := err.(type) { case nil: break case *linodego.Error: if deleteErr.Code >= http.StatusInternalServerError || deleteErr.Code == http.StatusTooManyRequests { - klog.Errorf("failed to add metadata for node (%s); retrying in 1 minute: %s", node.Name, err) - s.queue.AddAfter(node, retryInterval) + klog.Errorf("failed to add metadata for node (%s); retrying in 1 minute: %s", request.node.Name, err) + s.queue.AddAfter(request, retryInterval) } default: - klog.Errorf("failed to add metadata for node (%s); will not retry: %s", node.Name, err) + klog.Errorf("failed to add metadata for node (%s); will not retry: %s", request.node.Name, err) } + + registeredK8sNodeCache.updateCache(s.kubeclient) return true } @@ -168,9 +289,7 @@ func (s *nodeController) handleNode(ctx context.Context, node *v1.Node) error { expectedPrivateIP := "" // linode API response for linode will contain only one private ip - // if any private ip is configured. If it changes in future or linode - // supports other subnets with nodebalancer, this logic needs to be updated. - // https://www.linode.com/docs/api/linode-instances/#linode-view + // if any private ip is configured. for _, addr := range linode.IPv4 { if isPrivate(addr) { expectedPrivateIP = addr.String() @@ -183,34 +302,38 @@ func (s *nodeController) handleNode(ctx context.Context, node *v1.Node) error { return nil } + var updatedNode *v1.Node if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { // Get a fresh copy of the node so the resource version is up-to-date - n, err := s.kubeclient.CoreV1().Nodes().Get(ctx, node.Name, metav1.GetOptions{}) + nodeResult, err := s.kubeclient.CoreV1().Nodes().Get(ctx, node.Name, metav1.GetOptions{}) if err != nil { return err } // Try to update the node UUID if it has not been set - if n.Labels[annotations.AnnLinodeHostUUID] != linode.HostUUID { - n.Labels[annotations.AnnLinodeHostUUID] = linode.HostUUID + if nodeResult.Labels[annotations.AnnLinodeHostUUID] != linode.HostUUID { + nodeResult.Labels[annotations.AnnLinodeHostUUID] = linode.HostUUID } // Try to update the node ProviderID if it has not been set - if n.Spec.ProviderID == "" { - n.Spec.ProviderID = providerIDPrefix + strconv.Itoa(linode.ID) + if nodeResult.Spec.ProviderID == "" { + nodeResult.Spec.ProviderID = providerIDPrefix + strconv.Itoa(linode.ID) } // Try to update the expectedPrivateIP if its not set or doesn't match - if n.Annotations[annotations.AnnLinodeNodePrivateIP] != expectedPrivateIP && expectedPrivateIP != "" { - n.Annotations[annotations.AnnLinodeNodePrivateIP] = expectedPrivateIP + if nodeResult.Annotations[annotations.AnnLinodeNodePrivateIP] != expectedPrivateIP && expectedPrivateIP != "" { + nodeResult.Annotations[annotations.AnnLinodeNodePrivateIP] = expectedPrivateIP } - _, err = s.kubeclient.CoreV1().Nodes().Update(ctx, n, metav1.UpdateOptions{}) + updatedNode, err = s.kubeclient.CoreV1().Nodes().Update(ctx, nodeResult, metav1.UpdateOptions{}) return err }); err != nil { klog.V(1).ErrorS(err, "Node update error") return err } + if updatedNode != nil { + registeredK8sNodeCache.addNodeToCache(updatedNode) + } s.SetLastMetadataUpdate(node.Name) return nil diff --git a/cloud/linode/node_controller_test.go b/cloud/linode/node_controller_test.go index 409bc24d..c7e00ab0 100644 --- a/cloud/linode/node_controller_test.go +++ b/cloud/linode/node_controller_test.go @@ -1,7 +1,6 @@ package linode import ( - "context" "errors" "net" "net/http" @@ -9,15 +8,17 @@ import ( "time" "github.com/golang/mock/gomock" - "github.com/linode/linode-cloud-controller-manager/cloud/annotations" - "github.com/linode/linode-cloud-controller-manager/cloud/linode/client/mocks" "github.com/linode/linodego" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/util/workqueue" + + "github.com/linode/linode-cloud-controller-manager/cloud/annotations" + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client/mocks" ) func TestNodeController_Run(t *testing.T) { @@ -27,7 +28,7 @@ func TestNodeController_Run(t *testing.T) { client := mocks.NewMockClient(ctrl) kubeClient := fake.NewSimpleClientset() informer := informers.NewSharedInformerFactory(kubeClient, 0).Core().V1().Nodes() - mockQueue := workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[any]{Name: "test"}) + mockQueue := workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[nodeRequest]{Name: "test"}) nodeCtrl := newNodeController(kubeClient, client, informer, newInstances(client)) nodeCtrl.queue = mockQueue @@ -42,8 +43,8 @@ func TestNodeController_Run(t *testing.T) { }, Spec: v1.NodeSpec{}, } - _, err := kubeClient.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}) - assert.NoError(t, err, "expected no error during node creation") + _, err := kubeClient.CoreV1().Nodes().Create(t.Context(), node, metav1.CreateOptions{}) + require.NoError(t, err, "expected no error during node creation") // Start the controller stopCh := make(chan struct{}) @@ -52,7 +53,7 @@ func TestNodeController_Run(t *testing.T) { client.EXPECT().ListInstances(gomock.Any(), nil).AnyTimes().Return([]linodego.Instance{}, &linodego.Error{Code: http.StatusTooManyRequests, Message: "Too many requests"}) // Add the node to the informer err = nodeCtrl.informer.Informer().GetStore().Add(node) - assert.NoError(t, err, "expected no error when adding node to informer") + require.NoError(t, err, "expected no error when adding node to informer") // Allow some time for the queue to process time.Sleep(1 * time.Second) @@ -67,7 +68,7 @@ func TestNodeController_processNext(t *testing.T) { defer ctrl.Finish() client := mocks.NewMockClient(ctrl) kubeClient := fake.NewSimpleClientset() - queue := workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[any]{Name: "testQueue"}) + queue := workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[nodeRequest]{Name: "testQueue"}) node := &v1.Node{ ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -77,8 +78,8 @@ func TestNodeController_processNext(t *testing.T) { Spec: v1.NodeSpec{}, } - _, err := kubeClient.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}) - assert.NoError(t, err, "expected no error during node creation") + _, err := kubeClient.CoreV1().Nodes().Create(t.Context(), node, metav1.CreateOptions{}) + require.NoError(t, err, "expected no error during node creation") controller := &nodeController{ kubeclient: kubeClient, @@ -86,10 +87,11 @@ func TestNodeController_processNext(t *testing.T) { queue: queue, metadataLastUpdate: make(map[string]time.Time), ttl: defaultMetadataTTL, + nodeLastAdded: make(map[string]time.Time), } t.Run("should return no error on unknown errors", func(t *testing.T) { - queue.Add(node) + controller.addNodeToQueue(node) client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{}, errors.New("lookup failed")) result := controller.processNext() assert.True(t, result, "processNext should return true") @@ -98,8 +100,18 @@ func TestNodeController_processNext(t *testing.T) { } }) + t.Run("should return no error if timestamp for node being processed is older than the most recent request", func(t *testing.T) { + controller.addNodeToQueue(node) + controller.nodeLastAdded["test"] = time.Now().Add(controller.ttl) + result := controller.processNext() + assert.True(t, result, "processNext should return true") + if queue.Len() != 0 { + t.Errorf("expected queue to be empty, got %d items", queue.Len()) + } + }) + t.Run("should return no error if node exists", func(t *testing.T) { - queue.Add(node) + controller.addNodeToQueue(node) publicIP := net.ParseIP("172.234.31.123") privateIP := net.ParseIP("192.168.159.135") client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{ @@ -112,8 +124,27 @@ func TestNodeController_processNext(t *testing.T) { } }) - t.Run("should return no error if queued object is not of type Node", func(t *testing.T) { - queue.Add("abc") + t.Run("should return no error if node has providerID set", func(t *testing.T) { + node2 := &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node2", + Labels: map[string]string{}, + Annotations: map[string]string{}, + }, + Spec: v1.NodeSpec{ProviderID: "linode://112"}, + } + currInstances := controller.instances + defer func() { + controller.instances = currInstances + }() + controller.instances = newInstances(client) + registeredK8sNodeCache.lastUpdate = time.Now().Add(-15 * time.Minute) + controller.addNodeToQueue(node2) + publicIP := net.ParseIP("172.234.31.123") + privateIP := net.ParseIP("192.168.159.135") + client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{ + {ID: 112, Label: "test-node2", IPv4: []*net.IP{&publicIP, &privateIP}, HostUUID: "112"}, + }, nil) result := controller.processNext() assert.True(t, result, "processNext should return true") if queue.Len() != 0 { @@ -122,7 +153,7 @@ func TestNodeController_processNext(t *testing.T) { }) t.Run("should return no error if node in k8s doesn't exist", func(t *testing.T) { - queue.Add(node) + controller.addNodeToQueue(node) controller.kubeclient = fake.NewSimpleClientset() defer func() { controller.kubeclient = kubeClient }() result := controller.processNext() @@ -133,9 +164,9 @@ func TestNodeController_processNext(t *testing.T) { }) t.Run("should return error and requeue when it gets 429 from linode API", func(t *testing.T) { - queue = workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[any]{Name: "testQueue1"}) - queue.Add(node) + queue = workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[nodeRequest]{Name: "testQueue1"}) controller.queue = queue + controller.addNodeToQueue(node) client := mocks.NewMockClient(ctrl) controller.instances = newInstances(client) retryInterval = 1 * time.Nanosecond @@ -149,9 +180,9 @@ func TestNodeController_processNext(t *testing.T) { }) t.Run("should return error and requeue when it gets error >= 500 from linode API", func(t *testing.T) { - queue = workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[any]{Name: "testQueue2"}) - queue.Add(node) + queue = workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[nodeRequest]{Name: "testQueue2"}) controller.queue = queue + controller.addNodeToQueue(node) client := mocks.NewMockClient(ctrl) controller.instances = newInstances(client) retryInterval = 1 * time.Nanosecond @@ -179,8 +210,8 @@ func TestNodeController_handleNode(t *testing.T) { }, Spec: v1.NodeSpec{ProviderID: "linode://123"}, } - _, err := kubeClient.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}) - assert.NoError(t, err, "expected no error during node creation") + _, err := kubeClient.CoreV1().Nodes().Create(t.Context(), node, metav1.CreateOptions{}) + require.NoError(t, err, "expected no error during node creation") instCache := newInstances(client) @@ -188,14 +219,18 @@ func TestNodeController_handleNode(t *testing.T) { nodeCtrl := newNodeController(kubeClient, client, nil, instCache) assert.Equal(t, 30*time.Second, nodeCtrl.ttl, "expected ttl to be 30 seconds") + t.Setenv("K8S_NODECACHE_TTL", "60") + currK8sNodeCache := newK8sNodeCache() + assert.Equal(t, 60*time.Second, currK8sNodeCache.ttl, "expected ttl to be 60 seconds") + // Test: Successful metadata update publicIP := net.ParseIP("172.234.31.123") privateIP := net.ParseIP("192.168.159.135") client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{ {ID: 123, Label: "test-node", IPv4: []*net.IP{&publicIP, &privateIP}, HostUUID: "123"}, }, nil) - err = nodeCtrl.handleNode(context.TODO(), node) - assert.NoError(t, err, "expected no error during handleNode") + err = nodeCtrl.handleNode(t.Context(), node) + require.NoError(t, err, "expected no error during handleNode") // Check metadataLastUpdate lastUpdate := nodeCtrl.LastMetadataUpdate("test-node") @@ -206,16 +241,16 @@ func TestNodeController_handleNode(t *testing.T) { // Annotations set, no update needed as ttl not reached node.Labels[annotations.AnnLinodeHostUUID] = "123" node.Annotations[annotations.AnnLinodeNodePrivateIP] = privateIP.String() - err = nodeCtrl.handleNode(context.TODO(), node) - assert.NoError(t, err, "expected no error during handleNode") + err = nodeCtrl.handleNode(t.Context(), node) + require.NoError(t, err, "expected no error during handleNode") // Lookup failure for linode instance client = mocks.NewMockClient(ctrl) nodeCtrl.instances = newInstances(client) nodeCtrl.metadataLastUpdate["test-node"] = time.Now().Add(-2 * nodeCtrl.ttl) client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{}, errors.New("lookup failed")) - err = nodeCtrl.handleNode(context.TODO(), node) - assert.Error(t, err, "expected error during handleNode, got nil") + err = nodeCtrl.handleNode(t.Context(), node) + require.Error(t, err, "expected error during handleNode, got nil") // All fields already set client = mocks.NewMockClient(ctrl) @@ -224,6 +259,42 @@ func TestNodeController_handleNode(t *testing.T) { client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{ {ID: 123, Label: "test-node", IPv4: []*net.IP{&publicIP, &privateIP}, HostUUID: "123"}, }, nil) - err = nodeCtrl.handleNode(context.TODO(), node) + err = nodeCtrl.handleNode(t.Context(), node) assert.NoError(t, err, "expected no error during handleNode") } + +func Test_k8sNodeCache_addNodeToCache(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + + // Add node with providerID set + node := &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node", + Labels: map[string]string{}, + Annotations: map[string]string{}, + }, + Spec: v1.NodeSpec{ProviderID: "linode://123"}, + } + + currK8sNodeCache := newK8sNodeCache() + currK8sNodeCache.addNodeToCache(node) + + if _, exists := currK8sNodeCache.nodes[node.Name]; !exists { + t.Errorf("expected node to be added to cache") + } + + // Add node without providerID set + node2 := &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-node2", + Labels: map[string]string{}, + Annotations: map[string]string{}, + }, + } + + currK8sNodeCache.addNodeToCache(node2) + if _, exists := currK8sNodeCache.nodes[node2.Name]; exists { + t.Errorf("expected node to not be added to cache") + } +} diff --git a/cloud/linode/route_controller.go b/cloud/linode/route_controller.go index b1aa112b..5bac62b8 100644 --- a/cloud/linode/route_controller.go +++ b/cloud/linode/route_controller.go @@ -65,7 +65,7 @@ type routes struct { func newRoutes(client client.Client, instanceCache *instances) (cloudprovider.Routes, error) { timeout := 60 if raw, ok := os.LookupEnv("LINODE_ROUTES_CACHE_TTL_SECONDS"); ok { - if t, _ := strconv.Atoi(raw); t > 0 { + if t, err := strconv.Atoi(raw); t > 0 && err == nil { timeout = t } } @@ -112,6 +112,11 @@ func (r *routes) getInstanceFromName(ctx context.Context, name string) (*linodeg }, } + // fetch providerID from k8s node cache if it exists + if id, ok := registeredK8sNodeCache.getProviderID(name); ok { + node.Spec.ProviderID = id + } + // fetch instance with specified node name instance, err := r.instances.lookupLinode(ctx, node) if err != nil { @@ -235,6 +240,15 @@ func (r *routes) ListRoutes(ctx context.Context, clusterName string) ([]*cloudpr var configuredRoutes []*cloudprovider.Route for _, instance := range instances { + providerID := providerIDPrefix + strconv.Itoa(instance.ID) + label, found := registeredK8sNodeCache.getNodeLabel(providerID, instance.Label) + if !found { + klog.V(4).Infof("Node %s not found in k8s node cache, skipping listing its routes", instance.Label) + continue + } + // Update label to match with k8s registered label + instance.Label = label + instanceRoutes, err := r.getInstanceRoutes(ctx, instance.ID) if err != nil { klog.Errorf("Failed finding routes for instance id %d. Error: %v", instance.ID, err) diff --git a/cloud/linode/route_controller_test.go b/cloud/linode/route_controller_test.go index e6f2bff0..84ecdaf3 100644 --- a/cloud/linode/route_controller_test.go +++ b/cloud/linode/route_controller_test.go @@ -1,13 +1,16 @@ package linode import ( - "context" "net" + "strconv" "testing" "github.com/golang/mock/gomock" "github.com/linode/linodego" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" cloudprovider "k8s.io/cloud-provider" "k8s.io/utils/ptr" @@ -23,24 +26,32 @@ func TestListRoutes(t *testing.T) { nodeID := 123 name := "mock-instance" + node := &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: map[string]string{}, + Annotations: map[string]string{}, + }, + Spec: v1.NodeSpec{ProviderID: providerIDPrefix + strconv.Itoa(nodeID)}, + } publicIPv4 := net.ParseIP("45.76.101.25") privateIPv4 := net.ParseIP("192.168.133.65") linodeType := "g6-standard-1" region := "us-east" t.Run("should return empty if no instance exists in cluster", func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() ctrl := gomock.NewController(t) defer ctrl.Finish() client := mocks.NewMockClient(ctrl) instanceCache := newInstances(client) routeController, err := newRoutes(client, instanceCache) - assert.NoError(t, err) + require.NoError(t, err) client.EXPECT().ListInstances(gomock.Any(), gomock.Any()).Times(1).Return([]linodego.Instance{}, nil) client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).Times(2).Return([]linodego.VPCIP{}, nil) routes, err := routeController.ListRoutes(ctx, "test") - assert.NoError(t, err) + require.NoError(t, err) assert.Empty(t, routes) }) @@ -53,18 +64,18 @@ func TestListRoutes(t *testing.T) { } t.Run("should return no routes if instance exists but is not connected to VPC", func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() ctrl := gomock.NewController(t) defer ctrl.Finish() client := mocks.NewMockClient(ctrl) instanceCache := newInstances(client) routeController, err := newRoutes(client, instanceCache) - assert.NoError(t, err) + require.NoError(t, err) client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{validInstance}, nil) client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).Times(2).Return([]linodego.VPCIP{}, nil) routes, err := routeController.ListRoutes(ctx, "test") - assert.NoError(t, err) + require.NoError(t, err) assert.Empty(t, routes) }) @@ -80,18 +91,18 @@ func TestListRoutes(t *testing.T) { } t.Run("should return no routes if instance exists, connected to VPC but no ip_ranges configured", func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() ctrl := gomock.NewController(t) defer ctrl.Finish() client := mocks.NewMockClient(ctrl) instanceCache := newInstances(client) routeController, err := newRoutes(client, instanceCache) - assert.NoError(t, err) + require.NoError(t, err) client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{validInstance}, nil) client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).Times(4).Return(noRoutesInVPC, nil) routes, err := routeController.ListRoutes(ctx, "test") - assert.NoError(t, err) + require.NoError(t, err) assert.Empty(t, routes) }) @@ -122,18 +133,24 @@ func TestListRoutes(t *testing.T) { } t.Run("should return routes if instance exists, connected to VPC and ip_ranges configured", func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() ctrl := gomock.NewController(t) defer ctrl.Finish() client := mocks.NewMockClient(ctrl) instanceCache := newInstances(client) + existingK8sCache := registeredK8sNodeCache + defer func() { + registeredK8sNodeCache = existingK8sCache + }() + registeredK8sNodeCache = newK8sNodeCache() + registeredK8sNodeCache.addNodeToCache(node) routeController, err := newRoutes(client, instanceCache) - assert.NoError(t, err) + require.NoError(t, err) client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{validInstance}, nil) client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).Times(4).Return(routesInVPC, nil) routes, err := routeController.ListRoutes(ctx, "test") - assert.NoError(t, err) + require.NoError(t, err) assert.NotEmpty(t, routes) assert.Equal(t, addressRange1, routes[0].DestinationCIDR) assert.Equal(t, addressRange2, routes[1].DestinationCIDR) @@ -164,37 +181,61 @@ func TestListRoutes(t *testing.T) { } t.Run("should return no routes if instance exists, connected to VPC and ip_ranges configured but vpc id doesn't match", func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() ctrl := gomock.NewController(t) defer ctrl.Finish() client := mocks.NewMockClient(ctrl) instanceCache := newInstances(client) + existingK8sCache := registeredK8sNodeCache + defer func() { + registeredK8sNodeCache = existingK8sCache + }() + registeredK8sNodeCache = newK8sNodeCache() + registeredK8sNodeCache.addNodeToCache(node) routeController, err := newRoutes(client, instanceCache) - assert.NoError(t, err) + require.NoError(t, err) client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{validInstance}, nil) client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).Times(4).Return(routesInDifferentVPC, nil) routes, err := routeController.ListRoutes(ctx, "test") - assert.NoError(t, err) + require.NoError(t, err) assert.Empty(t, routes) }) t.Run("should return routes if multiple instances exists, connected to VPCs and ip_ranges configured", func(t *testing.T) { - ctx := context.Background() + ctx := t.Context() ctrl := gomock.NewController(t) defer ctrl.Finish() client := mocks.NewMockClient(ctrl) instanceCache := newInstances(client) + existingK8sCache := registeredK8sNodeCache + defer func() { + registeredK8sNodeCache = existingK8sCache + }() + registeredK8sNodeCache = newK8sNodeCache() + registeredK8sNodeCache.addNodeToCache(node) routeController, err := newRoutes(client, instanceCache) - assert.NoError(t, err) + require.NoError(t, err) vpcIP2 := "10.0.0.3" addressRange3 := "10.192.40.0/24" addressRange4 := "10.192.50.0/24" + addressRange5 := "10.192.60.0/24" + instance2Label := "mock-instance2" + instance3Label := "mock-instance3" + instance2ID := 124 + instance3ID := 125 validInstance2 := linodego.Instance{ - ID: 124, - Label: "mock-instance2", + ID: instance2ID, + Label: instance2Label, + Type: linodeType, + Region: region, + IPv4: []*net.IP{&publicIPv4, &privateIPv4}, + } + validInstance3 := linodego.Instance{ + ID: instance3ID, + Label: instance3Label, Type: linodeType, Region: region, IPv4: []*net.IP{&publicIPv4, &privateIPv4}, @@ -206,31 +247,56 @@ func TestListRoutes(t *testing.T) { AddressRange: nil, VPCID: vpcIDs["abc"], NAT1To1: nil, - LinodeID: 124, + LinodeID: instance2ID, }, { Address: nil, AddressRange: &addressRange3, VPCID: vpcIDs["abc"], NAT1To1: nil, - LinodeID: 124, + LinodeID: instance2ID, }, { Address: nil, AddressRange: &addressRange4, VPCID: vpcIDs["abc"], NAT1To1: nil, - LinodeID: 124, + LinodeID: instance2ID, }, + { + Address: &vpcIP2, + AddressRange: nil, + VPCID: vpcIDs["abc"], + NAT1To1: nil, + LinodeID: instance3ID, + }, + { + Address: nil, + AddressRange: &addressRange5, + VPCID: vpcIDs["abc"], + NAT1To1: nil, + LinodeID: instance3ID, + }, + } + + node2 := &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance2Label, + Labels: map[string]string{}, + Annotations: map[string]string{}, + }, + Spec: v1.NodeSpec{ProviderID: providerIDPrefix + strconv.Itoa(instance2ID)}, } - client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{validInstance, validInstance2}, nil) + registeredK8sNodeCache.addNodeToCache(node2) + + client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{validInstance, validInstance2, validInstance3}, nil) c1 := client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).Times(1).Return(routesInVPC, nil) c2 := client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).After(c1).Times(1).Return(routesInVPC2, nil) c3 := client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).After(c2).Times(1).Return(routesInVPC, nil) client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).After(c3).Times(1).Return(routesInVPC2, nil) routes, err := routeController.ListRoutes(ctx, "test") - assert.NoError(t, err) + require.NoError(t, err) assert.NotEmpty(t, routes) cidrs := make([]string, len(routes)) for i, value := range routes { @@ -240,11 +306,12 @@ func TestListRoutes(t *testing.T) { assert.Contains(t, cidrs, addressRange2) assert.Contains(t, cidrs, addressRange3) assert.Contains(t, cidrs, addressRange4) + assert.NotContains(t, cidrs, addressRange5) }) } func TestCreateRoute(t *testing.T) { - ctx := context.Background() + ctx := t.Context() Options.VPCNames = "dummy" vpcIDs["dummy"] = 1 Options.EnableRouteController = true @@ -255,6 +322,14 @@ func TestCreateRoute(t *testing.T) { privateIPv4 := net.ParseIP("192.168.133.65") linodeType := "g6-standard-1" region := "us-east" + node := &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: map[string]string{}, + Annotations: map[string]string{}, + }, + Spec: v1.NodeSpec{ProviderID: providerIDPrefix + strconv.Itoa(nodeID)}, + } validInstance := linodego.Instance{ ID: nodeID, Label: name, @@ -290,8 +365,14 @@ func TestCreateRoute(t *testing.T) { defer ctrl.Finish() client := mocks.NewMockClient(ctrl) instanceCache := newInstances(client) + existingK8sCache := registeredK8sNodeCache + defer func() { + registeredK8sNodeCache = existingK8sCache + }() + registeredK8sNodeCache = newK8sNodeCache() + registeredK8sNodeCache.addNodeToCache(node) routeController, err := newRoutes(client, instanceCache) - assert.NoError(t, err) + require.NoError(t, err) client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{validInstance}, nil) client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).Times(2).Return(noRoutesInVPC, nil) @@ -324,7 +405,7 @@ func TestCreateRoute(t *testing.T) { client := mocks.NewMockClient(ctrl) instanceCache := newInstances(client) routeController, err := newRoutes(client, instanceCache) - assert.NoError(t, err) + require.NoError(t, err) client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{validInstance}, nil) client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).Times(2).Return(routesInVPC, nil) @@ -338,7 +419,7 @@ func TestCreateRoute(t *testing.T) { client := mocks.NewMockClient(ctrl) instanceCache := newInstances(client) routeController, err := newRoutes(client, instanceCache) - assert.NoError(t, err) + require.NoError(t, err) client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{}, nil) client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).Times(1).Return([]linodego.VPCIP{}, nil) @@ -352,7 +433,7 @@ func TestDeleteRoute(t *testing.T) { vpcIDs["dummy"] = 1 Options.EnableRouteController = true - ctx := context.Background() + ctx := t.Context() nodeID := 123 name := "mock-instance" @@ -381,7 +462,7 @@ func TestDeleteRoute(t *testing.T) { client := mocks.NewMockClient(ctrl) instanceCache := newInstances(client) routeController, err := newRoutes(client, instanceCache) - assert.NoError(t, err) + require.NoError(t, err) client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{}, nil) client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).Times(1).Return([]linodego.VPCIP{}, nil) @@ -412,7 +493,7 @@ func TestDeleteRoute(t *testing.T) { client := mocks.NewMockClient(ctrl) instanceCache := newInstances(client) routeController, err := newRoutes(client, instanceCache) - assert.NoError(t, err) + require.NoError(t, err) client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{validInstance}, nil) client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).Times(2).Return(noRoutesInVPC, nil) @@ -444,7 +525,7 @@ func TestDeleteRoute(t *testing.T) { client := mocks.NewMockClient(ctrl) instanceCache := newInstances(client) routeController, err := newRoutes(client, instanceCache) - assert.NoError(t, err) + require.NoError(t, err) client.EXPECT().ListInstances(gomock.Any(), nil).Times(1).Return([]linodego.Instance{validInstance}, nil) client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).Times(2).Return(routesInVPC, nil) diff --git a/cloud/linode/service_controller.go b/cloud/linode/service_controller.go index 684cac7e..0d178eea 100644 --- a/cloud/linode/service_controller.go +++ b/cloud/linode/service_controller.go @@ -40,7 +40,7 @@ func (s *serviceController) Run(stopCh <-chan struct{}) { return } - if service.Spec.Type != "LoadBalancer" { + if service.Spec.Type != v1.ServiceTypeLoadBalancer { return } @@ -57,7 +57,7 @@ func (s *serviceController) Run(stopCh <-chan struct{}) { return } - if newSvc.Spec.Type != "LoadBalancer" && oldSvc.Spec.Type == "LoadBalancer" { + if newSvc.Spec.Type != v1.ServiceTypeLoadBalancer && oldSvc.Spec.Type == v1.ServiceTypeLoadBalancer { klog.Infof("ServiceController will handle service (%s) LoadBalancer deletion", getServiceNn(oldSvc)) s.queue.Add(oldSvc) } @@ -91,6 +91,7 @@ func (s *serviceController) processNextDeletion() bool { } err := s.handleServiceDeleted(service) + //nolint: errorlint //switching to errors.Is()/errors.As() causes errors with Code field switch deleteErr := err.(type) { case nil: break @@ -104,6 +105,7 @@ func (s *serviceController) processNextDeletion() bool { default: klog.Errorf("failed to delete NodeBalancer for service (%s); will not retry: %s", getServiceNn(service), err) } + return true } diff --git a/cloud/linode/service_controller_test.go b/cloud/linode/service_controller_test.go index 8d90d9ea..bcb8c340 100644 --- a/cloud/linode/service_controller_test.go +++ b/cloud/linode/service_controller_test.go @@ -1,17 +1,18 @@ package linode import ( - "context" "testing" "time" "github.com/golang/mock/gomock" - "github.com/linode/linode-cloud-controller-manager/cloud/linode/client/mocks" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/util/workqueue" + + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client/mocks" ) func Test_serviceController_Run(t *testing.T) { @@ -23,14 +24,17 @@ func Test_serviceController_Run(t *testing.T) { informer := informers.NewSharedInformerFactory(kubeClient, 0).Core().V1().Services() mockQueue := workqueue.NewTypedDelayingQueueWithConfig(workqueue.TypedDelayingQueueConfig[any]{Name: "test"}) - loadbalancers := newLoadbalancers(client, "us-east").(*loadbalancers) + loadbalancers, assertion := newLoadbalancers(client, "us-east").(*loadbalancers) + if !assertion { + t.Error("type assertion failed") + } svcCtrl := newServiceController(loadbalancers, informer) svcCtrl.queue = mockQueue svc := createTestService() svc.Spec.Type = "LoadBalancer" - _, err := kubeClient.CoreV1().Services("test-ns").Create(context.TODO(), svc, metav1.CreateOptions{}) - assert.NoError(t, err, "expected no error during svc creation") + _, err := kubeClient.CoreV1().Services("test-ns").Create(t.Context(), svc, metav1.CreateOptions{}) + require.NoError(t, err, "expected no error during svc creation") // Start the controller stopCh := make(chan struct{}) @@ -38,7 +42,7 @@ func Test_serviceController_Run(t *testing.T) { // Add svc to the informer err = svcCtrl.informer.Informer().GetStore().Add(svc) - assert.NoError(t, err, "expected no error when adding svc to informer") + require.NoError(t, err, "expected no error when adding svc to informer") // Allow some time for the queue to process time.Sleep(1 * time.Second) diff --git a/cloud/linode/vpc.go b/cloud/linode/vpc.go index 01c1ed14..81c73a9e 100644 --- a/cloud/linode/vpc.go +++ b/cloud/linode/vpc.go @@ -2,30 +2,47 @@ package linode import ( "context" + "encoding/json" "fmt" "net/http" + "strconv" "strings" "sync" - "github.com/linode/linode-cloud-controller-manager/cloud/linode/client" "github.com/linode/linodego" "k8s.io/klog/v2" + + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client" ) var ( Mu sync.RWMutex // vpcIDs map stores vpc id's for given vpc labels vpcIDs = make(map[string]int, 0) + // subnetIDs map stores subnet id's for given subnet labels + subnetIDs = make(map[string]int, 0) ) type vpcLookupError struct { value string } +type subnetLookupError struct { + value string +} + +type subnetFilter struct { + SubnetID string `json:"subnet_id"` +} + func (e vpcLookupError) Error() string { return fmt.Sprintf("failed to find VPC: %q", e.value) } +func (e subnetLookupError) Error() string { + return fmt.Sprintf("failed to find subnet: %q", e.value) +} + // GetAllVPCIDs returns vpc ids stored in map func GetAllVPCIDs() []int { Mu.Lock() @@ -59,13 +76,70 @@ func GetVPCID(ctx context.Context, client client.Client, vpcName string) (int, e return 0, vpcLookupError{vpcName} } +// GetSubnetID returns the subnet ID of given subnet label +func GetSubnetID(ctx context.Context, client client.Client, vpcID int, subnetName string) (int, error) { + Mu.Lock() + defer Mu.Unlock() + + // Check if map contains the id for the given label + if subnetid, ok := subnetIDs[subnetName]; ok { + return subnetid, nil + } + // Otherwise, get it from linodego.ListVPCSubnets() + subnets, err := client.ListVPCSubnets(ctx, vpcID, &linodego.ListOptions{}) + if err != nil { + return 0, err + } + for _, subnet := range subnets { + if subnet.Label == subnetName { + subnetIDs[subnetName] = subnet.ID + return subnet.ID, nil + } + } + + return 0, subnetLookupError{subnetName} +} + // GetVPCIPAddresses returns vpc ip's for given VPC label func GetVPCIPAddresses(ctx context.Context, client client.Client, vpcName string) ([]linodego.VPCIP, error) { vpcID, err := GetVPCID(ctx, client, strings.TrimSpace(vpcName)) if err != nil { return nil, err } - resp, err := client.ListVPCIPAddresses(ctx, vpcID, linodego.NewListOptions(0, "")) + + resultFilter := "" + + // Get subnet ID(s) from name(s) if subnet-names is specified + if Options.SubnetNames != "" { + // Get the IDs and store them + // subnetIDList is a slice of strings for ease of use with resultFilter + subnetNames := strings.Split(Options.SubnetNames, ",") + subnetIDList := []string{} + + for _, name := range subnetNames { + // For caching + var subnetID int + subnetID, err = GetSubnetID(ctx, client, vpcID, name) + // Don't filter subnets we can't find + if err != nil { + klog.Errorf("subnet %s not found due to error: %v. Skipping.", name, err) + continue + } + + // For use with the JSON filter + subnetIDList = append(subnetIDList, strconv.Itoa(subnetID)) + } + + // Assign the list of IDs to a stringified JSON filter + var filter []byte + filter, err = json.Marshal(subnetFilter{SubnetID: strings.Join(subnetIDList, ",")}) + if err != nil { + klog.Error("could not create JSON filter for subnet_id") + } + resultFilter = string(filter) + } + + resp, err := client.ListVPCIPAddresses(ctx, vpcID, linodego.NewListOptions(0, resultFilter)) if err != nil { if linodego.ErrHasStatus(err, http.StatusNotFound) { Mu.Lock() diff --git a/cloud/linode/vpc_test.go b/cloud/linode/vpc_test.go index 9e99b675..ce0625ff 100644 --- a/cloud/linode/vpc_test.go +++ b/cloud/linode/vpc_test.go @@ -1,7 +1,6 @@ package linode import ( - "context" "errors" "net/http" "reflect" @@ -9,9 +8,11 @@ import ( "testing" "github.com/golang/mock/gomock" - "github.com/linode/linode-cloud-controller-manager/cloud/linode/client/mocks" "github.com/linode/linodego" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/linode/linode-cloud-controller-manager/cloud/linode/client/mocks" ) func TestGetAllVPCIDs(t *testing.T) { @@ -49,7 +50,7 @@ func TestGetVPCID(t *testing.T) { defer ctrl.Finish() client := mocks.NewMockClient(ctrl) vpcIDs = map[string]int{"test1": 1, "test2": 2, "test3": 3} - got, err := GetVPCID(context.TODO(), client, "test3") + got, err := GetVPCID(t.Context(), client, "test3") if err != nil { t.Errorf("GetVPCID() error = %v", err) return @@ -65,8 +66,8 @@ func TestGetVPCID(t *testing.T) { client := mocks.NewMockClient(ctrl) vpcIDs = map[string]int{"test1": 1, "test2": 2, "test3": 3} client.EXPECT().ListVPCs(gomock.Any(), gomock.Any()).Times(1).Return([]linodego.VPC{}, errors.New("error")) - got, err := GetVPCID(context.TODO(), client, "test4") - assert.Error(t, err) + got, err := GetVPCID(t.Context(), client, "test4") + require.Error(t, err) if got != 0 { t.Errorf("GetVPCID() = %v, want %v", got, 0) } @@ -78,8 +79,8 @@ func TestGetVPCID(t *testing.T) { client := mocks.NewMockClient(ctrl) vpcIDs = map[string]int{"test1": 1, "test2": 2, "test3": 3} client.EXPECT().ListVPCs(gomock.Any(), gomock.Any()).Times(1).Return([]linodego.VPC{}, nil) - got, err := GetVPCID(context.TODO(), client, "test4") - assert.ErrorIs(t, err, vpcLookupError{"test4"}) + got, err := GetVPCID(t.Context(), client, "test4") + require.ErrorIs(t, err, vpcLookupError{"test4"}) if got != 0 { t.Errorf("GetVPCID() = %v, want %v", got, 0) } @@ -91,8 +92,8 @@ func TestGetVPCID(t *testing.T) { client := mocks.NewMockClient(ctrl) vpcIDs = map[string]int{"test1": 1, "test2": 2, "test3": 3} client.EXPECT().ListVPCs(gomock.Any(), gomock.Any()).Times(1).Return([]linodego.VPC{{ID: 4, Label: "test4"}}, nil) - got, err := GetVPCID(context.TODO(), client, "test4") - assert.NoError(t, err) + got, err := GetVPCID(t.Context(), client, "test4") + require.NoError(t, err) if got != 4 { t.Errorf("GetVPCID() = %v, want %v", got, 4) } @@ -106,8 +107,8 @@ func TestGetVPCIPAddresses(t *testing.T) { client := mocks.NewMockClient(ctrl) vpcIDs = map[string]int{"test1": 1, "test2": 2, "test3": 3} client.EXPECT().ListVPCs(gomock.Any(), gomock.Any()).Times(1).Return([]linodego.VPC{}, nil) - _, err := GetVPCIPAddresses(context.TODO(), client, "test4") - assert.Error(t, err) + _, err := GetVPCIPAddresses(t.Context(), client, "test4") + require.Error(t, err) }) t.Run("vpc id found but listing ip addresses fails with 404 error", func(t *testing.T) { @@ -116,8 +117,8 @@ func TestGetVPCIPAddresses(t *testing.T) { client := mocks.NewMockClient(ctrl) vpcIDs = map[string]int{"test1": 1, "test2": 2, "test3": 3} client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).Times(1).Return([]linodego.VPCIP{}, &linodego.Error{Code: http.StatusNotFound, Message: "[404] [label] VPC not found"}) - _, err := GetVPCIPAddresses(context.TODO(), client, "test3") - assert.Error(t, err) + _, err := GetVPCIPAddresses(t.Context(), client, "test3") + require.Error(t, err) _, exists := vpcIDs["test3"] assert.False(t, exists, "test3 key should get deleted from vpcIDs map") }) @@ -128,8 +129,8 @@ func TestGetVPCIPAddresses(t *testing.T) { client := mocks.NewMockClient(ctrl) vpcIDs = map[string]int{"test1": 1, "test2": 2, "test3": 3} client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).Times(1).Return([]linodego.VPCIP{}, &linodego.Error{Code: http.StatusInternalServerError, Message: "[500] [label] Internal Server Error"}) - _, err := GetVPCIPAddresses(context.TODO(), client, "test1") - assert.Error(t, err) + _, err := GetVPCIPAddresses(t.Context(), client, "test1") + require.Error(t, err) _, exists := vpcIDs["test1"] assert.True(t, exists, "test1 key should not get deleted from vpcIDs map") }) @@ -141,9 +142,85 @@ func TestGetVPCIPAddresses(t *testing.T) { vpcIDs = map[string]int{"test1": 1, "test2": 2, "test3": 3} client.EXPECT().ListVPCs(gomock.Any(), gomock.Any()).Times(1).Return([]linodego.VPC{{ID: 10, Label: "test10"}}, nil) client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).Times(1).Return([]linodego.VPCIP{}, nil) - _, err := GetVPCIPAddresses(context.TODO(), client, "test10") - assert.NoError(t, err) + _, err := GetVPCIPAddresses(t.Context(), client, "test10") + require.NoError(t, err) _, exists := vpcIDs["test10"] assert.True(t, exists, "test10 key should be present in vpcIDs map") }) + + t.Run("vpc id found and ip addresses found with subnet filtering", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + sn := Options.SubnetNames + defer func() { Options.SubnetNames = sn }() + Options.SubnetNames = "subnet4" + vpcIDs = map[string]int{"test1": 1} + subnetIDs = map[string]int{"subnet1": 1} + client.EXPECT().ListVPCs(gomock.Any(), gomock.Any()).Times(1).Return([]linodego.VPC{{ID: 10, Label: "test10"}}, nil) + client.EXPECT().ListVPCSubnets(gomock.Any(), gomock.Any(), gomock.Any()).Times(1).Return([]linodego.VPCSubnet{{ID: 4, Label: "subnet4"}}, nil) + client.EXPECT().ListVPCIPAddresses(gomock.Any(), gomock.Any(), gomock.Any()).Times(1).Return([]linodego.VPCIP{}, nil) + _, err := GetVPCIPAddresses(t.Context(), client, "test10") + require.NoError(t, err) + _, exists := subnetIDs["subnet4"] + assert.True(t, exists, "subnet4 should be present in subnetIDs map") + }) +} + +func TestGetSubnetID(t *testing.T) { + t.Run("subnet in cache", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + subnetIDs = map[string]int{"test1": 1, "test2": 2, "test3": 3} + got, err := GetSubnetID(t.Context(), client, 0, "test3") + if err != nil { + t.Errorf("GetSubnetID() error = %v", err) + return + } + if got != subnetIDs["test3"] { + t.Errorf("GetSubnetID() = %v, want %v", got, subnetIDs["test3"]) + } + }) + + t.Run("subnetID not in cache and listVPCSubnets return error", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + subnetIDs = map[string]int{"test1": 1, "test2": 2, "test3": 3} + client.EXPECT().ListVPCSubnets(gomock.Any(), gomock.Any(), gomock.Any()).Times(1).Return([]linodego.VPCSubnet{}, errors.New("error")) + got, err := GetSubnetID(t.Context(), client, 0, "test4") + require.Error(t, err) + if got != 0 { + t.Errorf("GetSubnetID() = %v, want %v", got, 0) + } + _, exists := subnetIDs["test4"] + assert.False(t, exists, "subnet4 should not be present in subnetIDs") + }) + + t.Run("subnetID not in cache and listVPCSubnets return nothing", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + subnetIDs = map[string]int{"test1": 1, "test2": 2, "test3": 3} + client.EXPECT().ListVPCSubnets(gomock.Any(), gomock.Any(), gomock.Any()).Times(1).Return([]linodego.VPCSubnet{}, nil) + got, err := GetSubnetID(t.Context(), client, 0, "test4") + require.ErrorIs(t, err, subnetLookupError{"test4"}) + if got != 0 { + t.Errorf("GetSubnetID() = %v, want %v", got, 0) + } + }) + + t.Run("subnetID not in cache and listVPCSubnets return subnet info", func(t *testing.T) { + ctrl := gomock.NewController(t) + defer ctrl.Finish() + client := mocks.NewMockClient(ctrl) + subnetIDs = map[string]int{"test1": 1, "test2": 2, "test3": 3} + client.EXPECT().ListVPCSubnets(gomock.Any(), gomock.Any(), gomock.Any()).Times(1).Return([]linodego.VPCSubnet{{ID: 4, Label: "test4"}}, nil) + got, err := GetSubnetID(t.Context(), client, 0, "test4") + require.NoError(t, err) + if got != 4 { + t.Errorf("GetSubnetID() = %v, want %v", got, 4) + } + }) } diff --git a/deploy/ccm-linode-template.yaml b/deploy/ccm-linode-template.yaml index 4f0048b1..829da922 100644 --- a/deploy/ccm-linode-template.yaml +++ b/deploy/ccm-linode-template.yaml @@ -77,10 +77,17 @@ spec: labels: app: ccm-linode spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + securityContext: + seccompProfile: + type: RuntimeDefault serviceAccountName: ccm-linode - nodeSelector: - # The CCM will only run on a Node labelled as a master, you may want to change this - node-role.kubernetes.io/control-plane: "" tolerations: # The CCM can run on Nodes tainted as masters - key: "node-role.kubernetes.io/control-plane" @@ -122,6 +129,11 @@ spec: secretKeyRef: name: ccm-linode key: region + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL volumes: - name: k8s hostPath: diff --git a/deploy/chart/templates/ccm-linode.yaml b/deploy/chart/templates/ccm-linode.yaml index c9d83c94..6aed9c85 100644 --- a/deploy/chart/templates/ccm-linode.yaml +++ b/deploy/chart/templates/ccm-linode.yaml @@ -9,4 +9,3 @@ stringData: region: {{ required ".Values.region required" .Values.region }} type: Opaque {{- end }} - diff --git a/deploy/chart/templates/clusterrole-rbac.yaml b/deploy/chart/templates/clusterrole-rbac.yaml index 42dbbeb8..65d0b4d5 100644 --- a/deploy/chart/templates/clusterrole-rbac.yaml +++ b/deploy/chart/templates/clusterrole-rbac.yaml @@ -1,3 +1,4 @@ +{{- if ne .Values.rbacEnabled false }} apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -38,3 +39,4 @@ rules: resources: ["ciliumbgppeeringpolicies"] verbs: ["get", "watch", "list", "create"] {{- end }} +{{- end }} diff --git a/deploy/chart/templates/clusterrolebinding-rbac.yaml b/deploy/chart/templates/clusterrolebinding-rbac.yaml index e4dada27..06a4373b 100644 --- a/deploy/chart/templates/clusterrolebinding-rbac.yaml +++ b/deploy/chart/templates/clusterrolebinding-rbac.yaml @@ -1,3 +1,4 @@ +{{- if ne .Values.rbacEnabled false }} kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: @@ -10,3 +11,4 @@ subjects: - kind: ServiceAccount name: ccm-linode namespace: {{ required ".Values.namespace required" .Values.namespace }} +{{- end }} diff --git a/deploy/chart/templates/daemonset.yaml b/deploy/chart/templates/daemonset.yaml index f2ca2985..b9b2effa 100644 --- a/deploy/chart/templates/daemonset.yaml +++ b/deploy/chart/templates/daemonset.yaml @@ -9,71 +9,128 @@ spec: selector: matchLabels: app: ccm-linode + {{- with .Values.updateStrategy }} + updateStrategy: + {{- toYaml . | nindent 4 }} + {{- end }} template: metadata: labels: app: ccm-linode spec: + {{- if ne .Values.rbacEnabled false }} serviceAccountName: ccm-linode - {{- if .Values.nodeSelector }} + {{- end }} + {{- with .Values.securityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.nodeSelector }} nodeSelector: -{{ toYaml .Values.nodeSelector | indent 8 }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} {{- end }} - {{- if .Values.tolerations }} + {{- with .Values.tolerations }} tolerations: -{{ toYaml .Values.tolerations | indent 8 }} + {{- toYaml . | nindent 8 }} {{- end }} - hostNetwork: true + hostNetwork: {{ if hasKey .Values "enableHostNetwork" }}{{ .Values.enableHostNetwork }}{{ else }}true{{ end }} containers: - image: {{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }} imagePullPolicy: {{ .Values.image.pullPolicy }} name: ccm-linode args: - --leader-elect-resource-lock=leases - - --v=3 + - --v={{ default 3 .Values.logVerbosity }} - --secure-port=10253 - --webhook-secure-port=0 - {{- if .Values.linodegoDebug }} - - --linodego-debug={{ .Values.linodegoDebug }} + {{- with .Values.linodegoDebug }} + - --linodego-debug={{ . }} {{- end }} - {{- if .Values.routeController }} - - --enable-route-controller=true - {{- if and .Values.routeController.vpcName .Values.routeController.vpcNames }} - {{- fail "Both vpcName and vpcNames are set. Please use only vpcNames." }} + {{- $vpcNames := .Values.vpcNames }} + {{- if and .Values.routeController .Values.routeController.vpcNames }} + {{- $vpcNames = .Values.routeController.vpcNames }} {{- end }} - {{- if not (or .Values.routeController.vpcName .Values.routeController.vpcNames) }} - {{- fail "Neither vpcName nor vpcNames is set. Please set one of them." }} + {{- $vpcName := .Values.vpcName }} + {{- if and .Values.routeController .Values.routeController.vpcName }} + {{- $vpcName = .Values.routeController.vpcName }} {{- end }} - {{- if .Values.routeController.vpcName }} - - --vpc-name={{ .Values.routeController.vpcName }} + {{- if and $vpcName $vpcNames }} + {{- fail "Both vpcName and vpcNames are set. Please use only vpcNames." }} {{- end }} - {{- if .Values.routeController.vpcNames }} - - --vpc-names={{ .Values.routeController.vpcNames }} + {{- $subnetNames := .Values.subnetNames }} + {{- if and .Values.routeController .Values.routeController.subnetNames }} + {{- $subnetNames = .Values.routeController.subnetNames }} + {{- end }} + {{- if .Values.routeController }} + - --enable-route-controller=true + {{- if not (or $vpcName $vpcNames) }} + {{- fail "Neither vpcName nor vpcNames is set. Please set one of them." }} {{- end }} - --configure-cloud-routes={{ default true .Values.routeController.configureCloudRoutes }} - --cluster-cidr={{ required "A valid .Values.routeController.clusterCIDR is required" .Values.routeController.clusterCIDR }} - {{- if .Values.routeController.routeReconciliationPeriod }} - - --route-reconciliation-period={{ .Values.routeController.routeReconciliationPeriod }} + {{- with .Values.routeController.routeReconciliationPeriod }} + - --route-reconciliation-period={{ . }} + {{- end }} {{- end }} + {{- with $vpcNames }} + - --vpc-names={{ . }} {{- end }} + {{- with $vpcName }} + - --vpc-name={{ . }} + {{- end }} + {{- with $subnetNames }} + - --subnet-names={{ . }} + {{ end }} {{- if .Values.sharedIPLoadBalancing }} - {{- if .Values.sharedIPLoadBalancing.bgpNodeSelector }} - - --bgp-node-selector={{ .Values.sharedIPLoadBalancing.bgpNodeSelector }} + {{- with .Values.sharedIPLoadBalancing.bgpNodeSelector }} + - --bgp-node-selector={{ . }} {{- end }} - {{- if .Values.sharedIPLoadBalancing.ipHolderSuffix }} - - --ip-holder-suffix={{ .Values.sharedIPLoadBalancing.ipHolderSuffix }} + {{- with .Values.sharedIPLoadBalancing.ipHolderSuffix }} + - --ip-holder-suffix={{ . }} {{- end}} - --load-balancer-type={{ required "A valid .Values.sharedIPLoadBalancing.loadBalancerType is required for shared IP load-balancing" .Values.sharedIPLoadBalancing.loadBalancerType }} {{- end }} - {{- if .Values.tokenHealthChecker }} - - --enable-token-health-checker={{ .Values.tokenHealthChecker }} + {{- with .Values.tokenHealthChecker }} + - --enable-token-health-checker={{ . }} {{- end }} - {{- if .Values.nodeBalancerTags }} - - --nodebalancer-tags={{ join " " .Values.nodeBalancerTags }} + {{- with .Values.nodeBalancerTags }} + - --nodebalancer-tags={{ join " " . }} {{- end }} {{- if .Values.allowUnauthorizedMetrics }} - --authorization-always-allow-paths="/metrics" {{- end }} + {{- if .Values.defaultNBType }} + - --default-nodebalancer-type={{ .Values.defaultNBType }} + {{- end }} + {{- if .Values.enableIPv6ForLoadBalancers }} + - --enable-ipv6-for-loadbalancers={{ .Values.enableIPv6ForLoadBalancers }} + {{- end }} + {{- if .Values.nodeBalancerBackendIPv4Subnet }} + - --nodebalancer-backend-ipv4-subnet={{ .Values.nodeBalancerBackendIPv4Subnet }} + {{- end }} + {{- if .Values.extraArgs }} + {{- toYaml .Values.extraArgs | nindent 12 }} + {{- end }} + {{- with .Values.containerSecurityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} volumeMounts: - mountPath: /etc/kubernetes name: k8s @@ -95,9 +152,13 @@ spec: {{- toYaml . | nindent 12 }} {{- end}} volumes: + {{- with .Values.k8sVolume }} + {{- toYaml . | nindent 8 }} + {{- else }} - name: k8s hostPath: path: /etc/kubernetes + {{- end }} {{- with .Values.volumes}} {{- toYaml . | nindent 8 }} {{- end}} diff --git a/deploy/chart/templates/serviceaccount.yaml b/deploy/chart/templates/serviceaccount.yaml index 023df6f1..6098f3c5 100644 --- a/deploy/chart/templates/serviceaccount.yaml +++ b/deploy/chart/templates/serviceaccount.yaml @@ -1,5 +1,7 @@ +{{- if ne .Values.rbacEnabled false }} apiVersion: v1 kind: ServiceAccount metadata: name: ccm-linode namespace: {{ required ".Values.namespace required" .Values.namespace }} +{{- end }} diff --git a/deploy/chart/values.yaml b/deploy/chart/values.yaml index 40c51f65..7da9649c 100644 --- a/deploy/chart/values.yaml +++ b/deploy/chart/values.yaml @@ -10,10 +10,27 @@ region: "" # apiTokenRef: "apiToken" # regionRef: "region" -# node-role.kubernetes.io/master - if set true, it deploys the svc on the master node -nodeSelector: - # The CCM will only run on a Node labelled as a master, you may want to change this - node-role.kubernetes.io/control-plane: "" +# Ensures the CCM runs on control plane nodes +affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + +# DEPRECATED: use affinity instead +nodeSelector: {} + +securityContext: + seccompProfile: + type: RuntimeDefault + +containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL # Image repository must be 'linode/linode-cloud-controller-manager'. The tag can be changed/set to various ccm versions. # The pullPolicy is set to Always but can be changed when it is not required to always pull the new image @@ -54,12 +71,29 @@ tolerations: # routeController: # vpcName: [Deprecated: use vpcNames instead] # vpcNames: +# subnetNames: # clusterCIDR: 10.0.0.0/8 # configureCloudRoutes: true +# vpcs and subnets that node internal IPs will be assigned from (not required if already specified in routeController) +# vpcName: [Deprecated: use vpcNames instead] +# vpcNames: +# subnetNames: + # Enable Linode token health checker # tokenHealthChecker: true +# Default NodeBalancer type to create("common" or "premium"). Default is "common" +# defaultNBType: "common" + +# Enable IPv6 ingress addresses for LoadBalancer services +# When enabled, both IPv4 and IPv6 addresses will be included in the LoadBalancer status for all services +# This can also be controlled per-service using the "service.beta.kubernetes.io/linode-loadbalancer-enable-ipv6-ingress" annotation +# enableIPv6ForLoadBalancers: true + +# nodeBalancerBackendIPv4Subnet is the subnet to use for the backend ips of the NodeBalancer +# nodeBalancerBackendIPv4Subnet: "" + # This section adds the ability to pass environment variables to adjust CCM defaults # https://github.com/linode/linode-cloud-controller-manager/blob/master/cloud/linode/loadbalancers.go # LINODE_HOSTNAME_ONLY_INGRESS type bool is supported @@ -81,5 +115,53 @@ volumeMounts: # - mountPath: /tmp/ # name: test-volume +resources: + # requests: + # memory: "100Mi" + # cpu: "50m" + # limits: + # memory: "1000Mi" + # cpu: "1000m" + # This flag allows to scrape /metrics endpoint without authorization -allowUnauthorizedMetrics: false +# allowUnauthorizedMetrics: false + +# enables host network +# enableHostNetwork: true + +# specify the update strategy +# updateStrategy: +# type: RollingUpdate +# rollingUpdate: +# maxUnavailable: 25% + +# log verbosity of the CCM +# logVerbosity: 3 + +# set the liveness and readiness probes +# livenessProbe: +# tcpSocket: +# port: 10253 +# initialDelaySeconds: 15 +# timeoutSeconds: 15 +# failureThreshold: 8 + +# readinessProbe: +# tcpSocket: +# port: 10253 +# initialDelaySeconds: 15 +# timeoutSeconds: 15 +# failureThreshold: 8 + +# set extra arguments to the CCM container. +# extraArgs: +# - --kubeconfig=/etc/kubernetes/value + +# add toggle to rbac, useful when running CCM outside a cluster +# rbacEnabled: true + +# template kubeconfig volume +# k8sVolume: +# - name: k8s +# hostPath: +# path: /etc/kubeconfig diff --git a/devbox.json b/devbox.json index ff3d96fa..c32d2d26 100644 --- a/devbox.json +++ b/devbox.json @@ -4,7 +4,7 @@ "clusterctl@latest", "docker@latest", "envsubst@latest", - "go@1.23.3", + "go@1.24.1", "golangci-lint@latest", "jq@latest", "kind@latest", @@ -22,6 +22,7 @@ "mgmt-and-capl-cluster": "make mgmt-and-capl-cluster", "e2e-test": "make e2e-test", "e2e-test-bgp": "make e2e-test-bgp", + "e2e-test-subnet": "make e2e-test-subnet", "cleanup-cluster": "make cleanup-cluster" } }, diff --git a/devbox.lock b/devbox.lock index cb767c68..f172e44c 100644 --- a/devbox.lock +++ b/devbox.lock @@ -2,296 +2,296 @@ "lockfile_version": "1", "packages": { "clusterctl@latest": { - "last_modified": "2024-11-03T14:18:04Z", - "resolved": "github:NixOS/nixpkgs/4ae2e647537bcdbb82265469442713d066675275#clusterctl", + "last_modified": "2025-02-23T09:42:26Z", + "resolved": "github:NixOS/nixpkgs/2d068ae5c6516b2d04562de50a58c682540de9bf#clusterctl", "source": "devbox-search", - "version": "1.8.4", + "version": "1.9.5", "systems": { "aarch64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/5s555wvi2h0w7r99raairnfzrvmpmh6q-clusterctl-1.8.4", + "path": "/nix/store/rhz6ibkai0lk1hfjc9cplmplgg2birvj-clusterctl-1.9.5", "default": true } ], - "store_path": "/nix/store/5s555wvi2h0w7r99raairnfzrvmpmh6q-clusterctl-1.8.4" + "store_path": "/nix/store/rhz6ibkai0lk1hfjc9cplmplgg2birvj-clusterctl-1.9.5" }, "aarch64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/1bqxwb82x5lg07a4kwc22ws342d0dwxz-clusterctl-1.8.4", + "path": "/nix/store/fxbllswc2ajmfacrj3455a9lqwqp6rah-clusterctl-1.9.5", "default": true } ], - "store_path": "/nix/store/1bqxwb82x5lg07a4kwc22ws342d0dwxz-clusterctl-1.8.4" + "store_path": "/nix/store/fxbllswc2ajmfacrj3455a9lqwqp6rah-clusterctl-1.9.5" }, "x86_64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/wj491hzqdi6bzrzm8mcznwbd1a4542km-clusterctl-1.8.4", + "path": "/nix/store/fgxhqa2ap491rc5hhrxh3y9av7ysv4nx-clusterctl-1.9.5", "default": true } ], - "store_path": "/nix/store/wj491hzqdi6bzrzm8mcznwbd1a4542km-clusterctl-1.8.4" + "store_path": "/nix/store/fgxhqa2ap491rc5hhrxh3y9av7ysv4nx-clusterctl-1.9.5" }, "x86_64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/2vyd0iw7d2x9rlan9jjpyvpzbr3c63ic-clusterctl-1.8.4", + "path": "/nix/store/s3fsmnxsiz1gv5gk81igbw46ijkidv43-clusterctl-1.9.5", "default": true } ], - "store_path": "/nix/store/2vyd0iw7d2x9rlan9jjpyvpzbr3c63ic-clusterctl-1.8.4" + "store_path": "/nix/store/s3fsmnxsiz1gv5gk81igbw46ijkidv43-clusterctl-1.9.5" } } }, "ctlptl@latest": { - "last_modified": "2024-11-03T14:18:04Z", - "resolved": "github:NixOS/nixpkgs/4ae2e647537bcdbb82265469442713d066675275#ctlptl", + "last_modified": "2025-02-27T15:48:43Z", + "resolved": "github:NixOS/nixpkgs/6c5c5f5100281f8f4ff23f13edd17d645178c87c#ctlptl", "source": "devbox-search", - "version": "0.8.35", + "version": "0.8.39", "systems": { "aarch64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/1nc7z4qscr5fh7lppiiz7xb1hqrhjqpb-ctlptl-0.8.35", + "path": "/nix/store/s403dlw5syd2mvwk655zjc2dza0r79ad-ctlptl-0.8.39", "default": true } ], - "store_path": "/nix/store/1nc7z4qscr5fh7lppiiz7xb1hqrhjqpb-ctlptl-0.8.35" + "store_path": "/nix/store/s403dlw5syd2mvwk655zjc2dza0r79ad-ctlptl-0.8.39" }, "aarch64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/9sm3jwlkdxbgkv3vz6pip5r45v8cjnhf-ctlptl-0.8.35", + "path": "/nix/store/cjxia3v18wmhmrngy77dq7l7gbpxzh6w-ctlptl-0.8.39", "default": true } ], - "store_path": "/nix/store/9sm3jwlkdxbgkv3vz6pip5r45v8cjnhf-ctlptl-0.8.35" + "store_path": "/nix/store/cjxia3v18wmhmrngy77dq7l7gbpxzh6w-ctlptl-0.8.39" }, "x86_64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/zn1s9i1s25dgb0yphp8avrfqqk7wxjbv-ctlptl-0.8.35", + "path": "/nix/store/kzz611ikpys2994zma71y8xi7kjszrbl-ctlptl-0.8.39", "default": true } ], - "store_path": "/nix/store/zn1s9i1s25dgb0yphp8avrfqqk7wxjbv-ctlptl-0.8.35" + "store_path": "/nix/store/kzz611ikpys2994zma71y8xi7kjszrbl-ctlptl-0.8.39" }, "x86_64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/nd5bc6s6jxczmf1vg2vm314621dwfcak-ctlptl-0.8.35", + "path": "/nix/store/c4q76lxk08v6csh46v841wi6jv4vycqw-ctlptl-0.8.39", "default": true } ], - "store_path": "/nix/store/nd5bc6s6jxczmf1vg2vm314621dwfcak-ctlptl-0.8.35" + "store_path": "/nix/store/c4q76lxk08v6csh46v841wi6jv4vycqw-ctlptl-0.8.39" } } }, "docker@latest": { - "last_modified": "2024-11-05T01:08:39Z", - "resolved": "github:NixOS/nixpkgs/a04d33c0c3f1a59a2c1cb0c6e34cd24500e5a1dc#docker", + "last_modified": "2025-03-11T17:52:14Z", + "resolved": "github:NixOS/nixpkgs/0d534853a55b5d02a4ababa1d71921ce8f0aee4c#docker", "source": "devbox-search", - "version": "27.3.1", + "version": "27.5.1", "systems": { "aarch64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/n2l6y7zp830kasbw0xirfhqliniln54l-docker-27.3.1", + "path": "/nix/store/r3bnfs9i0fs8f7zvws56ff7jvxa7l6dw-docker-27.5.1", "default": true } ], - "store_path": "/nix/store/n2l6y7zp830kasbw0xirfhqliniln54l-docker-27.3.1" + "store_path": "/nix/store/r3bnfs9i0fs8f7zvws56ff7jvxa7l6dw-docker-27.5.1" }, "aarch64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/30w5k9rjzsjhscahps94d0bhd7f57pv8-docker-27.3.1", + "path": "/nix/store/g44x3mhdkwiqlxyxynwc28qzl0d7mcx2-docker-27.5.1", "default": true } ], - "store_path": "/nix/store/30w5k9rjzsjhscahps94d0bhd7f57pv8-docker-27.3.1" + "store_path": "/nix/store/g44x3mhdkwiqlxyxynwc28qzl0d7mcx2-docker-27.5.1" }, "x86_64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/l4cfzw1bvvcqn0s1yyvc2pxmjz17mymv-docker-27.3.1", + "path": "/nix/store/w8xv8gg4vbcdavb54aji5jna0ljl25z3-docker-27.5.1", "default": true } ], - "store_path": "/nix/store/l4cfzw1bvvcqn0s1yyvc2pxmjz17mymv-docker-27.3.1" + "store_path": "/nix/store/w8xv8gg4vbcdavb54aji5jna0ljl25z3-docker-27.5.1" }, "x86_64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/267rdap7pn4wg03q2akrm5lx9xsls6rk-docker-27.3.1", + "path": "/nix/store/pkc753xpjd6kycn8q637d6s7x2aqs1a4-docker-27.5.1", "default": true } ], - "store_path": "/nix/store/267rdap7pn4wg03q2akrm5lx9xsls6rk-docker-27.3.1" + "store_path": "/nix/store/pkc753xpjd6kycn8q637d6s7x2aqs1a4-docker-27.5.1" } } }, "envsubst@latest": { - "last_modified": "2024-11-03T14:18:04Z", - "resolved": "github:NixOS/nixpkgs/4ae2e647537bcdbb82265469442713d066675275#envsubst", + "last_modified": "2025-03-13T11:38:39Z", + "resolved": "github:NixOS/nixpkgs/573c650e8a14b2faa0041645ab18aed7e60f0c9a#envsubst", "source": "devbox-search", - "version": "1.4.2", + "version": "1.4.3", "systems": { "aarch64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/76jp5bcfhf17znkaffqsfqdr0p1gz8cx-envsubst-1.4.2", + "path": "/nix/store/7mjq4k1cml69mlzs6s8angvrlmimg46l-envsubst-1.4.3", "default": true } ], - "store_path": "/nix/store/76jp5bcfhf17znkaffqsfqdr0p1gz8cx-envsubst-1.4.2" + "store_path": "/nix/store/7mjq4k1cml69mlzs6s8angvrlmimg46l-envsubst-1.4.3" }, "aarch64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/c3cilncva8s3x4cmpfv29jsp1ypj35p4-envsubst-1.4.2", + "path": "/nix/store/525gfxbch1hcbm3dayxhjbi6pz8wbjgn-envsubst-1.4.3", "default": true } ], - "store_path": "/nix/store/c3cilncva8s3x4cmpfv29jsp1ypj35p4-envsubst-1.4.2" + "store_path": "/nix/store/525gfxbch1hcbm3dayxhjbi6pz8wbjgn-envsubst-1.4.3" }, "x86_64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/ix7y1xqrnm1gs23y0ylspi91m7490qiq-envsubst-1.4.2", + "path": "/nix/store/1kv0wdd1cpgcixhqcjl9pk426fin7r91-envsubst-1.4.3", "default": true } ], - "store_path": "/nix/store/ix7y1xqrnm1gs23y0ylspi91m7490qiq-envsubst-1.4.2" + "store_path": "/nix/store/1kv0wdd1cpgcixhqcjl9pk426fin7r91-envsubst-1.4.3" }, "x86_64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/ylvk3rs98ssd24qkpxc04mji18magc9f-envsubst-1.4.2", + "path": "/nix/store/mrd7mba205x134jq7ghcmi19rbr8clxj-envsubst-1.4.3", "default": true } ], - "store_path": "/nix/store/ylvk3rs98ssd24qkpxc04mji18magc9f-envsubst-1.4.2" + "store_path": "/nix/store/mrd7mba205x134jq7ghcmi19rbr8clxj-envsubst-1.4.3" } } }, - "go@1.23.3": { - "last_modified": "2024-11-28T07:51:56Z", - "resolved": "github:NixOS/nixpkgs/226216574ada4c3ecefcbbec41f39ce4655f78ef#go", + "go@1.24.1": { + "last_modified": "2025-03-11T17:52:14Z", + "resolved": "github:NixOS/nixpkgs/0d534853a55b5d02a4ababa1d71921ce8f0aee4c#go", "source": "devbox-search", - "version": "1.23.3", + "version": "1.24.1", "systems": { "aarch64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/qrj2wp6vzfpjfrrlcmr22818zg83fb73-go-1.23.3", + "path": "/nix/store/ja4jxx60lh1qfqfl4z4p2rff56ia1c3c-go-1.24.1", "default": true } ], - "store_path": "/nix/store/qrj2wp6vzfpjfrrlcmr22818zg83fb73-go-1.23.3" + "store_path": "/nix/store/ja4jxx60lh1qfqfl4z4p2rff56ia1c3c-go-1.24.1" }, "aarch64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/dm66qyl19skrwcmk4rb9xcs64xc1d071-go-1.23.3", + "path": "/nix/store/8ply43gnxk1xwichr81mpgbjcd9a1y5w-go-1.24.1", "default": true } ], - "store_path": "/nix/store/dm66qyl19skrwcmk4rb9xcs64xc1d071-go-1.23.3" + "store_path": "/nix/store/8ply43gnxk1xwichr81mpgbjcd9a1y5w-go-1.24.1" }, "x86_64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/vkjn6njpz4gy5ma763vh8hh93bgjwycr-go-1.23.3", + "path": "/nix/store/87yxrfx5lh78bdz393i33cr5z23x06q4-go-1.24.1", "default": true } ], - "store_path": "/nix/store/vkjn6njpz4gy5ma763vh8hh93bgjwycr-go-1.23.3" + "store_path": "/nix/store/87yxrfx5lh78bdz393i33cr5z23x06q4-go-1.24.1" }, "x86_64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/bavnchxi7v6xs077jxv7fl5rrqc3y87w-go-1.23.3", + "path": "/nix/store/cfjhl0kn7xc65466pha9fkrvigw3g72n-go-1.24.1", "default": true } ], - "store_path": "/nix/store/bavnchxi7v6xs077jxv7fl5rrqc3y87w-go-1.23.3" + "store_path": "/nix/store/cfjhl0kn7xc65466pha9fkrvigw3g72n-go-1.24.1" } } }, "golangci-lint@latest": { - "last_modified": "2024-11-03T14:18:04Z", - "resolved": "github:NixOS/nixpkgs/4ae2e647537bcdbb82265469442713d066675275#golangci-lint", + "last_modified": "2025-03-11T17:52:14Z", + "resolved": "github:NixOS/nixpkgs/0d534853a55b5d02a4ababa1d71921ce8f0aee4c#golangci-lint", "source": "devbox-search", - "version": "1.61.0", + "version": "1.64.6", "systems": { "aarch64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/vm7syji08qh6q1s7ckd777p7kcjflx9b-golangci-lint-1.61.0", + "path": "/nix/store/w7v0fzxk2y99h5h92rbbn5q4cg3khsk7-golangci-lint-1.64.6", "default": true } ], - "store_path": "/nix/store/vm7syji08qh6q1s7ckd777p7kcjflx9b-golangci-lint-1.61.0" + "store_path": "/nix/store/w7v0fzxk2y99h5h92rbbn5q4cg3khsk7-golangci-lint-1.64.6" }, "aarch64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/6vx22sm4x9lmyqswq7svmih0q68c92lg-golangci-lint-1.61.0", + "path": "/nix/store/945ji0smgfq62ngq8hyp987khy1194c0-golangci-lint-1.64.6", "default": true } ], - "store_path": "/nix/store/6vx22sm4x9lmyqswq7svmih0q68c92lg-golangci-lint-1.61.0" + "store_path": "/nix/store/945ji0smgfq62ngq8hyp987khy1194c0-golangci-lint-1.64.6" }, "x86_64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/ipn5pi90mallx4d4c923h3rc7bpmiwz9-golangci-lint-1.61.0", + "path": "/nix/store/gvr1ngpwj9azzayxjvbiczflh75njk3j-golangci-lint-1.64.6", "default": true } ], - "store_path": "/nix/store/ipn5pi90mallx4d4c923h3rc7bpmiwz9-golangci-lint-1.61.0" + "store_path": "/nix/store/gvr1ngpwj9azzayxjvbiczflh75njk3j-golangci-lint-1.64.6" }, "x86_64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/bz2kxbkb9yxdkz2pdl640g32xyqxqd4c-golangci-lint-1.61.0", + "path": "/nix/store/0mmm8g5pwwk7j86zwb10x53bkdm0k174-golangci-lint-1.64.6", "default": true } ], - "store_path": "/nix/store/bz2kxbkb9yxdkz2pdl640g32xyqxqd4c-golangci-lint-1.61.0" + "store_path": "/nix/store/0mmm8g5pwwk7j86zwb10x53bkdm0k174-golangci-lint-1.64.6" } } }, "jq@latest": { - "last_modified": "2024-11-03T14:18:04Z", - "resolved": "github:NixOS/nixpkgs/4ae2e647537bcdbb82265469442713d066675275#jq", + "last_modified": "2025-03-13T11:38:39Z", + "resolved": "github:NixOS/nixpkgs/573c650e8a14b2faa0041645ab18aed7e60f0c9a#jq", "source": "devbox-search", "version": "1.7.1", "systems": { @@ -299,343 +299,343 @@ "outputs": [ { "name": "bin", - "path": "/nix/store/mx6zrpgk9ncxmdkriivad29g6ms54lp4-jq-1.7.1-bin", + "path": "/nix/store/a0d5735pbpxr817gjzvv6bdy2ycj38nf-jq-1.7.1-bin", "default": true }, { "name": "man", - "path": "/nix/store/13q2k3y8rabhsxknma0by8m9kvvdc5z4-jq-1.7.1-man", + "path": "/nix/store/id3vbyr5wqz7ljylcdqjvrpdsiv4a02v-jq-1.7.1-man", "default": true }, { "name": "dev", - "path": "/nix/store/3zyi01bjcf4f54khnnyhpbhg53g552mh-jq-1.7.1-dev" + "path": "/nix/store/yhykv5iafas7lp327w3xkj1gxkhifcbk-jq-1.7.1-dev" }, { "name": "doc", - "path": "/nix/store/njqci9px1wh3nd1k0w0rdizkj7dq38sz-jq-1.7.1-doc" + "path": "/nix/store/rqjr3mln9jmidha6izp07fq9fp30y4sx-jq-1.7.1-doc" }, { "name": "out", - "path": "/nix/store/g25q96a9y4m2y5v8acyyd16l4wml2haz-jq-1.7.1" + "path": "/nix/store/m0kqs2n6b4idvhpzcqad9jfz1cbwib2n-jq-1.7.1" } ], - "store_path": "/nix/store/mx6zrpgk9ncxmdkriivad29g6ms54lp4-jq-1.7.1-bin" + "store_path": "/nix/store/a0d5735pbpxr817gjzvv6bdy2ycj38nf-jq-1.7.1-bin" }, "aarch64-linux": { "outputs": [ { "name": "bin", - "path": "/nix/store/m749l3lg9kmld916656p0b4mb9p9i62y-jq-1.7.1-bin", + "path": "/nix/store/64k31kawylkwbjh4r1bwxfq8zb0yzqyi-jq-1.7.1-bin", "default": true }, { "name": "man", - "path": "/nix/store/061h9pv30awg36fjfhzsw5a1bh37zcdr-jq-1.7.1-man", + "path": "/nix/store/w29k8y3v6rfyy4kvx95dc79lyfcq7ikf-jq-1.7.1-man", "default": true }, - { - "name": "dev", - "path": "/nix/store/4y9x9d4w7np0vby58glizzaf5p55g4ak-jq-1.7.1-dev" - }, { "name": "doc", - "path": "/nix/store/xlpqc0zdgbi2wg0rxippj4jp7wgbqbmk-jq-1.7.1-doc" + "path": "/nix/store/mhfyqmwvnqihqs5av778m4lic0p15hyf-jq-1.7.1-doc" }, { "name": "out", - "path": "/nix/store/ar5glhxq1x82ngnd6cni4wpfdfd06kdz-jq-1.7.1" + "path": "/nix/store/y3sdsgvnpmwp42d2kd9rq3d5anraccpy-jq-1.7.1" + }, + { + "name": "dev", + "path": "/nix/store/r5wikmfi7na32gcx46p3ncxmzns1f5xz-jq-1.7.1-dev" } ], - "store_path": "/nix/store/m749l3lg9kmld916656p0b4mb9p9i62y-jq-1.7.1-bin" + "store_path": "/nix/store/64k31kawylkwbjh4r1bwxfq8zb0yzqyi-jq-1.7.1-bin" }, "x86_64-darwin": { "outputs": [ { "name": "bin", - "path": "/nix/store/91chd95c04083fxabvjbvpnaxizji71d-jq-1.7.1-bin", + "path": "/nix/store/p6ph78v4kf373vnsigwaqh7j3dlapnny-jq-1.7.1-bin", "default": true }, { "name": "man", - "path": "/nix/store/2xgr0bm2dcn64jxyh7v8jg8ygdpy6g50-jq-1.7.1-man", + "path": "/nix/store/83f3x5240k5kqc8bw758nq4q6d3j7l5p-jq-1.7.1-man", "default": true }, { - "name": "out", - "path": "/nix/store/wnxi834pnbfl9lz2ahx62z6ivsl3d3ns-jq-1.7.1" + "name": "doc", + "path": "/nix/store/5md80wrj52gklrxwsf68dkqc9igmq928-jq-1.7.1-doc" }, { - "name": "dev", - "path": "/nix/store/39kp2v0mkw82jkggmwlpyv0pzkav2z2y-jq-1.7.1-dev" + "name": "out", + "path": "/nix/store/fbv7k9csv3m2af8chfblxzlv2mgaz8i0-jq-1.7.1" }, { - "name": "doc", - "path": "/nix/store/d53nra4g8m3lfg4zqrmjmmp2b7cfbilx-jq-1.7.1-doc" + "name": "dev", + "path": "/nix/store/6lhl23njgh2pjh3sypxv4s0g6n0zdmqk-jq-1.7.1-dev" } ], - "store_path": "/nix/store/91chd95c04083fxabvjbvpnaxizji71d-jq-1.7.1-bin" + "store_path": "/nix/store/p6ph78v4kf373vnsigwaqh7j3dlapnny-jq-1.7.1-bin" }, "x86_64-linux": { "outputs": [ { "name": "bin", - "path": "/nix/store/wj603ds3b3gdwsrlx4nzcg4v3ba2298b-jq-1.7.1-bin", + "path": "/nix/store/134m2q047vsr9miwh5l227j7sh9jb130-jq-1.7.1-bin", "default": true }, { "name": "man", - "path": "/nix/store/yiwlz5r6vlb6g32fczyb6zghnrizv3mq-jq-1.7.1-man", + "path": "/nix/store/8vlqxykb9959nn5bl5vph99d2c4np9wg-jq-1.7.1-man", "default": true }, - { - "name": "dev", - "path": "/nix/store/rlxn658k96prpc4xhrzld4jwjqvkb2bz-jq-1.7.1-dev" - }, { "name": "doc", - "path": "/nix/store/d0cwkm74mp1mqbf3bsdkbyx94byipyzp-jq-1.7.1-doc" + "path": "/nix/store/byqqs2ayp67zvmd6dqi4jahwm1f779cj-jq-1.7.1-doc" }, { "name": "out", - "path": "/nix/store/3nsnyac45i07pfgjw5bn1kpbwaxphm70-jq-1.7.1" + "path": "/nix/store/6zy1hkwlwq9r97swjy616vf096dccrsr-jq-1.7.1" + }, + { + "name": "dev", + "path": "/nix/store/94yz5x26ssx2pqn7gamlngp306s8mz60-jq-1.7.1-dev" } ], - "store_path": "/nix/store/wj603ds3b3gdwsrlx4nzcg4v3ba2298b-jq-1.7.1-bin" + "store_path": "/nix/store/134m2q047vsr9miwh5l227j7sh9jb130-jq-1.7.1-bin" } } }, "kind@latest": { - "last_modified": "2024-11-03T14:18:04Z", - "resolved": "github:NixOS/nixpkgs/4ae2e647537bcdbb82265469442713d066675275#kind", + "last_modified": "2025-03-11T17:52:14Z", + "resolved": "github:NixOS/nixpkgs/0d534853a55b5d02a4ababa1d71921ce8f0aee4c#kind", "source": "devbox-search", - "version": "0.24.0", + "version": "0.27.0", "systems": { "aarch64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/ipy4vca55lan1szk9h9g7n3mrnm1lpza-kind-0.24.0", + "path": "/nix/store/6pdsjczw3jrxfdms3vavbrszdn03z18h-kind-0.27.0", "default": true } ], - "store_path": "/nix/store/ipy4vca55lan1szk9h9g7n3mrnm1lpza-kind-0.24.0" + "store_path": "/nix/store/6pdsjczw3jrxfdms3vavbrszdn03z18h-kind-0.27.0" }, "aarch64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/bx135jsrpwq43d4hnl2c6v6qph5685zk-kind-0.24.0", + "path": "/nix/store/a801y50y156y9wjywm0jc1yc101932h7-kind-0.27.0", "default": true } ], - "store_path": "/nix/store/bx135jsrpwq43d4hnl2c6v6qph5685zk-kind-0.24.0" + "store_path": "/nix/store/a801y50y156y9wjywm0jc1yc101932h7-kind-0.27.0" }, "x86_64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/6g7iymghysai09dlhrddgifb2sf4zb50-kind-0.24.0", + "path": "/nix/store/s3q5xzg0hhbi23l424gwm91823qawlhz-kind-0.27.0", "default": true } ], - "store_path": "/nix/store/6g7iymghysai09dlhrddgifb2sf4zb50-kind-0.24.0" + "store_path": "/nix/store/s3q5xzg0hhbi23l424gwm91823qawlhz-kind-0.27.0" }, "x86_64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/kpykmx58v8p2rddp4p1cqy24c5ym7a3z-kind-0.24.0", + "path": "/nix/store/l1bwqbahw30drpmivzivl9w47c8gfj1c-kind-0.27.0", "default": true } ], - "store_path": "/nix/store/kpykmx58v8p2rddp4p1cqy24c5ym7a3z-kind-0.24.0" + "store_path": "/nix/store/l1bwqbahw30drpmivzivl9w47c8gfj1c-kind-0.27.0" } } }, "kubectl@latest": { - "last_modified": "2024-11-03T14:18:04Z", - "resolved": "github:NixOS/nixpkgs/4ae2e647537bcdbb82265469442713d066675275#kubectl", + "last_modified": "2025-03-11T17:52:14Z", + "resolved": "github:NixOS/nixpkgs/0d534853a55b5d02a4ababa1d71921ce8f0aee4c#kubectl", "source": "devbox-search", - "version": "1.31.2", + "version": "1.32.2", "systems": { "aarch64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/hk0qrz3w3ivibz67fjah61gpq5dfpj7n-kubectl-1.31.2", + "path": "/nix/store/cr69x3lwm2bvicn21xijyswvvfvckb78-kubectl-1.32.2", "default": true }, { "name": "man", - "path": "/nix/store/k2dwhk2hdhdp7vbliij1jgrfm0rvj57c-kubectl-1.31.2-man", + "path": "/nix/store/h827jr60kqihjv4kgasfibhsrg4hbxmd-kubectl-1.32.2-man", "default": true }, { "name": "convert", - "path": "/nix/store/90nf3rw5h92bzafsf24s2ijfwfbbglvy-kubectl-1.31.2-convert" + "path": "/nix/store/554dpvy977n9871yvc21nqhsnqkdg5fr-kubectl-1.32.2-convert" } ], - "store_path": "/nix/store/hk0qrz3w3ivibz67fjah61gpq5dfpj7n-kubectl-1.31.2" + "store_path": "/nix/store/cr69x3lwm2bvicn21xijyswvvfvckb78-kubectl-1.32.2" }, "aarch64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/z3mlcpb4gd4n1c7c2ly7fz2j65zkcv3n-kubectl-1.31.2", + "path": "/nix/store/ln7cfszvc6gjjrarbv0ipf1h8yzgkl46-kubectl-1.32.2", "default": true }, { "name": "man", - "path": "/nix/store/6wc7cni53c0g9162z281qqmflfpp3vq7-kubectl-1.31.2-man", + "path": "/nix/store/m79ivh1p7158mycrxiqslsdwqz0a2y15-kubectl-1.32.2-man", "default": true }, { "name": "convert", - "path": "/nix/store/kbkblm912v1lgrmqvg187kviwxfg3ywr-kubectl-1.31.2-convert" + "path": "/nix/store/x9c450w9l5l9hdb4sqvysi6wlsg522x7-kubectl-1.32.2-convert" } ], - "store_path": "/nix/store/z3mlcpb4gd4n1c7c2ly7fz2j65zkcv3n-kubectl-1.31.2" + "store_path": "/nix/store/ln7cfszvc6gjjrarbv0ipf1h8yzgkl46-kubectl-1.32.2" }, "x86_64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/bgq5fk947zf52ys0izx4k4m7pwq77pri-kubectl-1.31.2", + "path": "/nix/store/i31rmk5c0cj83i961kamvnz6wns61rr5-kubectl-1.32.2", "default": true }, { "name": "man", - "path": "/nix/store/xija3wpdm6jmkmlfd0y6d49vgg3098lw-kubectl-1.31.2-man", + "path": "/nix/store/bxxybbhglqz5dcrqfziil75kfp0xgbsf-kubectl-1.32.2-man", "default": true }, { "name": "convert", - "path": "/nix/store/g49s8ahgcsm2m5azd09ql7434mdzif33-kubectl-1.31.2-convert" + "path": "/nix/store/4pma212svdc8gvicxrs9dwv1d070jz6x-kubectl-1.32.2-convert" } ], - "store_path": "/nix/store/bgq5fk947zf52ys0izx4k4m7pwq77pri-kubectl-1.31.2" + "store_path": "/nix/store/i31rmk5c0cj83i961kamvnz6wns61rr5-kubectl-1.32.2" }, "x86_64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/nqkn7vjqgcnp834vg0dwff4rj30v9i65-kubectl-1.31.2", + "path": "/nix/store/z71m7l32ll43nb5cpfs5fb3acaz3rpx2-kubectl-1.32.2", "default": true }, { "name": "man", - "path": "/nix/store/zfm38523vg5frylms8klxsi8jyqh374i-kubectl-1.31.2-man", + "path": "/nix/store/0nwm2ksngri57gvgn2vvpcvy2gcz79bb-kubectl-1.32.2-man", "default": true }, { "name": "convert", - "path": "/nix/store/0yj1raiv1zddfarndmgrgmd7p27cbq6m-kubectl-1.31.2-convert" + "path": "/nix/store/52cf0bmr4pqpid2nid4l1b9i1zr8zkgn-kubectl-1.32.2-convert" } ], - "store_path": "/nix/store/nqkn7vjqgcnp834vg0dwff4rj30v9i65-kubectl-1.31.2" + "store_path": "/nix/store/z71m7l32ll43nb5cpfs5fb3acaz3rpx2-kubectl-1.32.2" } } }, "kustomize@latest": { - "last_modified": "2024-11-03T14:18:04Z", - "resolved": "github:NixOS/nixpkgs/4ae2e647537bcdbb82265469442713d066675275#kustomize", + "last_modified": "2025-03-11T17:52:14Z", + "resolved": "github:NixOS/nixpkgs/0d534853a55b5d02a4ababa1d71921ce8f0aee4c#kustomize", "source": "devbox-search", - "version": "5.5.0", + "version": "5.6.0", "systems": { "aarch64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/64vp26c9px3fzsm2ffvx2wvs8cybzbdm-kustomize-5.5.0", + "path": "/nix/store/psv0pkib82866f7062hrmmmwc2i0csbf-kustomize-5.6.0", "default": true } ], - "store_path": "/nix/store/64vp26c9px3fzsm2ffvx2wvs8cybzbdm-kustomize-5.5.0" + "store_path": "/nix/store/psv0pkib82866f7062hrmmmwc2i0csbf-kustomize-5.6.0" }, "aarch64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/vg2hwhhs52vbbm215rb8vak8hbf86rq6-kustomize-5.5.0", + "path": "/nix/store/xa8s3a4fafwl36a5kb2xwwph8w36rw6y-kustomize-5.6.0", "default": true } ], - "store_path": "/nix/store/vg2hwhhs52vbbm215rb8vak8hbf86rq6-kustomize-5.5.0" + "store_path": "/nix/store/xa8s3a4fafwl36a5kb2xwwph8w36rw6y-kustomize-5.6.0" }, "x86_64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/98hqbf6yr88bdq98axpr2b2894q380fc-kustomize-5.5.0", + "path": "/nix/store/hbn2n3x511s7v7cnn5rw6ln01yk2syp6-kustomize-5.6.0", "default": true } ], - "store_path": "/nix/store/98hqbf6yr88bdq98axpr2b2894q380fc-kustomize-5.5.0" + "store_path": "/nix/store/hbn2n3x511s7v7cnn5rw6ln01yk2syp6-kustomize-5.6.0" }, "x86_64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/wqy1ckdjjy07mngl62dxhfkcpriv3j3s-kustomize-5.5.0", + "path": "/nix/store/cwclm6315x1cn2kswzfhfcqp13qf44b0-kustomize-5.6.0", "default": true } ], - "store_path": "/nix/store/wqy1ckdjjy07mngl62dxhfkcpriv3j3s-kustomize-5.5.0" + "store_path": "/nix/store/cwclm6315x1cn2kswzfhfcqp13qf44b0-kustomize-5.6.0" } } }, "kyverno-chainsaw@latest": { - "last_modified": "2024-11-05T18:23:38Z", - "resolved": "github:NixOS/nixpkgs/8c4dc69b9732f6bbe826b5fbb32184987520ff26#kyverno-chainsaw", + "last_modified": "2025-02-23T09:42:26Z", + "resolved": "github:NixOS/nixpkgs/2d068ae5c6516b2d04562de50a58c682540de9bf#kyverno-chainsaw", "source": "devbox-search", - "version": "0.2.11", + "version": "0.2.12", "systems": { "aarch64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/2dfq7856a9i2vxbw3bc3dhq6ad505hxz-kyverno-chainsaw-0.2.11", + "path": "/nix/store/xq8y9rk2mv3g0bh5j12v5qwwavc60cbq-kyverno-chainsaw-0.2.12", "default": true } ], - "store_path": "/nix/store/2dfq7856a9i2vxbw3bc3dhq6ad505hxz-kyverno-chainsaw-0.2.11" + "store_path": "/nix/store/xq8y9rk2mv3g0bh5j12v5qwwavc60cbq-kyverno-chainsaw-0.2.12" }, "aarch64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/9d4ddygkw9sdvv38lgk8p68pnkmpw1dk-kyverno-chainsaw-0.2.11", + "path": "/nix/store/awgv9ca78l625y31gm5jib535dkg6lv6-kyverno-chainsaw-0.2.12", "default": true } ], - "store_path": "/nix/store/9d4ddygkw9sdvv38lgk8p68pnkmpw1dk-kyverno-chainsaw-0.2.11" + "store_path": "/nix/store/awgv9ca78l625y31gm5jib535dkg6lv6-kyverno-chainsaw-0.2.12" }, "x86_64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/mmsixz2pm6gwrnrlhbgrjq78891gdrxq-kyverno-chainsaw-0.2.11", + "path": "/nix/store/i6s9gmqrfyp4mnn6qqwchh9lk832xf3b-kyverno-chainsaw-0.2.12", "default": true } ], - "store_path": "/nix/store/mmsixz2pm6gwrnrlhbgrjq78891gdrxq-kyverno-chainsaw-0.2.11" + "store_path": "/nix/store/i6s9gmqrfyp4mnn6qqwchh9lk832xf3b-kyverno-chainsaw-0.2.12" }, "x86_64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/2dw8y9zfs6ri4wnram125pgqgym0q29d-kyverno-chainsaw-0.2.11", + "path": "/nix/store/7fhp7vnvchql72za4vz40cz0a53zcz75-kyverno-chainsaw-0.2.12", "default": true } ], - "store_path": "/nix/store/2dw8y9zfs6ri4wnram125pgqgym0q29d-kyverno-chainsaw-0.2.11" + "store_path": "/nix/store/7fhp7vnvchql72za4vz40cz0a53zcz75-kyverno-chainsaw-0.2.12" } } }, "mockgen@latest": { - "last_modified": "2024-11-03T14:18:04Z", - "resolved": "github:NixOS/nixpkgs/4ae2e647537bcdbb82265469442713d066675275#mockgen", + "last_modified": "2025-03-11T17:52:14Z", + "resolved": "github:NixOS/nixpkgs/0d534853a55b5d02a4ababa1d71921ce8f0aee4c#mockgen", "source": "devbox-search", "version": "0.5.0", "systems": { @@ -643,89 +643,89 @@ "outputs": [ { "name": "out", - "path": "/nix/store/xw7pgrlkpwrvxzin1k0fs8da63nacnpa-mockgen-0.5.0", + "path": "/nix/store/ggiv2g1d7g93qv4hnggb1i4vysc588ls-mockgen-0.5.0", "default": true } ], - "store_path": "/nix/store/xw7pgrlkpwrvxzin1k0fs8da63nacnpa-mockgen-0.5.0" + "store_path": "/nix/store/ggiv2g1d7g93qv4hnggb1i4vysc588ls-mockgen-0.5.0" }, "aarch64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/lrmd1ahi1sfcc518wjp6sbqmmbfic9vy-mockgen-0.5.0", + "path": "/nix/store/m2pqzcvhgi3n67zy4kbbsxprb4zwrg80-mockgen-0.5.0", "default": true } ], - "store_path": "/nix/store/lrmd1ahi1sfcc518wjp6sbqmmbfic9vy-mockgen-0.5.0" + "store_path": "/nix/store/m2pqzcvhgi3n67zy4kbbsxprb4zwrg80-mockgen-0.5.0" }, "x86_64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/i5471z9sakpqvx4s9m5kjk4xhpn22hwn-mockgen-0.5.0", + "path": "/nix/store/dvg2ic642cv35k3bm6qyd9azhdlnp1cg-mockgen-0.5.0", "default": true } ], - "store_path": "/nix/store/i5471z9sakpqvx4s9m5kjk4xhpn22hwn-mockgen-0.5.0" + "store_path": "/nix/store/dvg2ic642cv35k3bm6qyd9azhdlnp1cg-mockgen-0.5.0" }, "x86_64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/0gamxw6355qv6b9hpzwkfsnfsvhc60f9-mockgen-0.5.0", + "path": "/nix/store/zj8d9ppk5srx2d8jq0jzi9ln97r4zv3p-mockgen-0.5.0", "default": true } ], - "store_path": "/nix/store/0gamxw6355qv6b9hpzwkfsnfsvhc60f9-mockgen-0.5.0" + "store_path": "/nix/store/zj8d9ppk5srx2d8jq0jzi9ln97r4zv3p-mockgen-0.5.0" } } }, "yq-go@latest": { - "last_modified": "2024-11-03T14:18:04Z", - "resolved": "github:NixOS/nixpkgs/4ae2e647537bcdbb82265469442713d066675275#yq-go", + "last_modified": "2025-03-11T17:52:14Z", + "resolved": "github:NixOS/nixpkgs/0d534853a55b5d02a4ababa1d71921ce8f0aee4c#yq-go", "source": "devbox-search", - "version": "4.44.3", + "version": "4.45.1", "systems": { "aarch64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/nypcsysgi0n88m3laa0yh94304d2k4gl-yq-go-4.44.3", + "path": "/nix/store/m92kn50px5czhrjmcd9vq674bpkg3ykj-yq-go-4.45.1", "default": true } ], - "store_path": "/nix/store/nypcsysgi0n88m3laa0yh94304d2k4gl-yq-go-4.44.3" + "store_path": "/nix/store/m92kn50px5czhrjmcd9vq674bpkg3ykj-yq-go-4.45.1" }, "aarch64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/av2h5x1i6dcg55s9a7cq11maxjsqbmq5-yq-go-4.44.3", + "path": "/nix/store/kn5sgwgs94gnsifzlcysb2xgiyvm3aqz-yq-go-4.45.1", "default": true } ], - "store_path": "/nix/store/av2h5x1i6dcg55s9a7cq11maxjsqbmq5-yq-go-4.44.3" + "store_path": "/nix/store/kn5sgwgs94gnsifzlcysb2xgiyvm3aqz-yq-go-4.45.1" }, "x86_64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/3gw1mw88j7w5xj2lkvfkqzya5jxhhn82-yq-go-4.44.3", + "path": "/nix/store/naa1j0ps9v9d51xf63j3r1y4rmslzyzx-yq-go-4.45.1", "default": true } ], - "store_path": "/nix/store/3gw1mw88j7w5xj2lkvfkqzya5jxhhn82-yq-go-4.44.3" + "store_path": "/nix/store/naa1j0ps9v9d51xf63j3r1y4rmslzyzx-yq-go-4.45.1" }, "x86_64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/g4989ys9ngzld6mfcn0frfb82w3jlhb3-yq-go-4.44.3", + "path": "/nix/store/r8lab8vdy98adpil6xvz716168dqx7kn-yq-go-4.45.1", "default": true } ], - "store_path": "/nix/store/g4989ys9ngzld6mfcn0frfb82w3jlhb3-yq-go-4.44.3" + "store_path": "/nix/store/r8lab8vdy98adpil6xvz716168dqx7kn-yq-go-4.45.1" } } } diff --git a/docs/configuration/README.md b/docs/configuration/README.md index 9d8f2531..b78a2bd3 100644 --- a/docs/configuration/README.md +++ b/docs/configuration/README.md @@ -28,12 +28,13 @@ The Linode Cloud Controller Manager (CCM) offers extensive configuration options - Node controller behavior - [See node management](nodes.md#node-controller-behavior) -4. **[Environment Variables](environment.md)** +4. **[Environment Variables and Flags](environment.md)** - Cache settings - API configuration - Network settings - BGP configuration - - [See environment reference](environment.md#available-variables) + - IPv6 configuration + - [See configuration reference](environment.md#flags) 5. **[Firewall Setup](firewall.md)** - CCM-managed firewalls diff --git a/docs/configuration/annotations.md b/docs/configuration/annotations.md index 185627a3..fb20b71c 100644 --- a/docs/configuration/annotations.md +++ b/docs/configuration/annotations.md @@ -32,6 +32,11 @@ For implementation details, see: | `tags` | string | | A comma separated list of tags to be applied to the NodeBalancer instance | | `firewall-id` | string | | An existing Cloud Firewall ID to be attached to the NodeBalancer instance. See [Firewall Setup](firewall.md) | | `firewall-acl` | string | | The Firewall rules to be applied to the NodeBalancer. See [Firewall Configuration](#firewall-configuration) | +| `nodebalancer-type` | string | | The type of NodeBalancer to create (options: common, premium). See [NodeBalancer Types](#nodebalancer-type) | +| `enable-ipv6-ingress` | bool | `false` | When `true`, both IPv4 and IPv6 addresses will be included in the LoadBalancerStatus ingress | +| `backend-ipv4-range` | string | | The IPv4 range from VPC subnet to be applied to the NodeBalancer backend. See [Nodebalancer VPC Configuration](#nodebalancer-vpc-configuration) | +| `backend-vpc-name` | string | | VPC which is connected to the NodeBalancer backend. See [Nodebalancer VPC Configuration](#nodebalancer-vpc-configuration) | +| `backend-subnet-name` | string | | Subnet within VPC which is connected to the NodeBalancer backend. See [Nodebalancer VPC Configuration](#nodebalancer-vpc-configuration) | ### Port Specific Configuration @@ -104,6 +109,29 @@ metadata: } ``` +### NodeBalancer Type +Linode supports nodebalancers of different types: common and premium. By default, nodebalancers of type common are provisioned. If an account is allowed to provision premium nodebalancers and one wants to use them, it can be achieved by specifying the annotation: +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-type: premium + +### Nodebalancer VPC Configuration +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-backend-ipv4-range: "10.100.0.0/30" + service.beta.kubernetes.io/linode-loadbalancer-vpc-name: "vpc1" + service.beta.kubernetes.io/linode-loadbalancer-subnet-name: "subnet1" +``` + +### Service with IPv6 Address +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-enable-ipv6-ingress: "true" +``` + For more examples and detailed configuration options, see: - [LoadBalancer Configuration](loadbalancer.md) - [Firewall Configuration](firewall.md) diff --git a/docs/configuration/environment.md b/docs/configuration/environment.md index 15ad47f0..9d3624de 100644 --- a/docs/configuration/environment.md +++ b/docs/configuration/environment.md @@ -1,10 +1,10 @@ -# Environment Variables +# Environment Variables and Flags ## Overview -Environment variables provide global configuration options for the CCM. These settings affect caching, API behavior, and networking configurations. +The CCM can be configured using environment variables and flags. Environment variables provide global configuration options, while flags control specific features. -## Available Variables +## Environment Variables ### Cache Configuration @@ -12,6 +12,8 @@ Environment variables provide global configuration options for the CCM. These se |----------|---------|-------------| | `LINODE_INSTANCE_CACHE_TTL` | `15` | Default timeout of instance cache in seconds | | `LINODE_ROUTES_CACHE_TTL_SECONDS` | `60` | Default timeout of route cache in seconds | +| `LINODE_METADATA_TTL` | `300` | Default linode metadata timeout in seconds | +| `K8S_NODECACHE_TTL` | `300` | Default timeout of k8s node cache in seconds | ### API Configuration @@ -28,6 +30,25 @@ Environment variables provide global configuration options for the CCM. These se | `BGP_CUSTOM_ID_MAP` | "" | Use your own map instead of default region map for BGP | | `BGP_PEER_PREFIX` | `2600:3c0f` | Use your own BGP peer prefix instead of default one | +## Flags + +The CCM supports the following flags: + +| Flag | Default | Description | +|------|---------|-------------| +| `--linodego-debug` | `false` | Enables debug output for the LinodeAPI wrapper | +| `--enable-route-controller` | `false` | Enables route_controller for CCM | +| `--enable-token-health-checker` | `false` | Enables Linode API token health checker | +| `--vpc-names` | `""` | Comma separated VPC names whose routes will be managed by route-controller | +| `--subnet-names` | `""` | Comma separated subnet names whose routes will be managed by route-controller (requires vpc-names flag) | +| `--load-balancer-type` | `nodebalancer` | Configures which type of load-balancing to use (options: nodebalancer, cilium-bgp) | +| `--bgp-node-selector` | `""` | Node selector to use to perform shared IP fail-over with BGP | +| `--ip-holder-suffix` | `""` | Suffix to append to the IP holder name when using shared IP fail-over with BGP | +| `--default-nodebalancer-type` | `common` | Default type of NodeBalancer to create (options: common, premium) | +| `--nodebalancer-tags` | `[]` | Linode tags to apply to all NodeBalancers | +| `--nodebalancer-backend-ipv4-subnet` | `""` | ipv4 subnet to use for NodeBalancer backends | +| `--enable-ipv6-for-loadbalancers` | `false` | Set both IPv4 and IPv6 addresses for all LoadBalancer services (when disabled, only IPv4 is used). This can also be configured per-service using the `service.beta.kubernetes.io/linode-loadbalancer-enable-ipv6-ingress` annotation. | + ## Configuration Methods ### Helm Chart @@ -36,6 +57,9 @@ Configure via `values.yaml`: env: - name: LINODE_INSTANCE_CACHE_TTL value: "30" +args: + - --enable-ipv6-for-loadbalancers + - --enable-route-controller ``` ### Manual Deployment @@ -49,6 +73,9 @@ spec: env: - name: LINODE_INSTANCE_CACHE_TTL value: "30" + args: + - --enable-ipv6-for-loadbalancers + - --enable-route-controller ``` ## Usage Guidelines diff --git a/docs/configuration/loadbalancer.md b/docs/configuration/loadbalancer.md index c0781fe2..b7117685 100644 --- a/docs/configuration/loadbalancer.md +++ b/docs/configuration/loadbalancer.md @@ -18,6 +18,32 @@ When using NodeBalancers, the CCM automatically: For more details, see [Linode NodeBalancer Documentation](https://www.linode.com/docs/products/networking/nodebalancers/). +### IPv6 Support + +NodeBalancers support both IPv4 and IPv6 ingress addresses. By default, the CCM uses only IPv4 address for LoadBalancer services. + +You can enable IPv6 addresses globally for all services by setting the `enable-ipv6-for-loadbalancers` flag: + +```yaml +spec: + template: + spec: + containers: + - name: ccm-linode + args: + - --enable-ipv6-for-loadbalancers=true +``` + +Alternatively, you can enable IPv6 addresses for individual services using the annotation: + +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-enable-ipv6-ingress: "true" +``` + +When IPv6 is enabled (either globally or per-service), both IPv4 and IPv6 addresses will be included in the service's LoadBalancer status. + ### Basic Configuration Create a LoadBalancer service: @@ -120,10 +146,10 @@ metadata: ## BGP-based IP Sharing Implementation -BGP-based IP sharing provides a more cost-effective solution for multiple LoadBalancer services. For detailed setup instructions, see [Cilium BGP Documentation](https://docs.cilium.io/en/stable/network/bgp-control-plane/). +BGP-based IP sharing provides a more cost-effective solution for multiple LoadBalancer services. For detailed setup instructions, see [Cilium BGP Documentation](https://docs.cilium.io/en/stable/network/bgp-control-plane/bgp-control-plane/). ### Prerequisites -- [Cilium CNI](https://docs.cilium.io/en/stable/network/bgp-control-plane/) with BGP control plane enabled +- [Cilium CNI](https://docs.cilium.io/en/stable/network/bgp-control-plane/bgp-control-plane/) with BGP control plane enabled - Additional IP provisioning enabled on your account (contact [Linode Support](https://www.linode.com/support/)) - Nodes labeled for BGP peering @@ -150,6 +176,29 @@ kubectl label node my-node cilium-bgp-peering=true For more details, see [Environment Variables](environment.md#network-configuration). +## Configuring NodeBalancers directly with VPC +NodeBalancers can be configured to have VPC specific ips configured as backend nodes. It requires: +1. VPC with a subnet and Linodes in VPC +2. Each NodeBalancer created within that VPC needs a free /30 or bigger subnet from the subnet to which Linodes are connected + +Specify NodeBalancer backend ipv4 range when creating service: +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-backend-ipv4-range: "10.100.0.0/30" +``` + +By default, CCM uses first VPC and Subnet name configured with it to attach NodeBalancers to that VPC subnet. To overwrite those, use: +```yaml +metadata: + annotations: + service.beta.kubernetes.io/linode-loadbalancer-backend-ipv4-range: "10.100.0.4/30" + service.beta.kubernetes.io/linode-loadbalancer-vpc-name: "vpc1" + service.beta.kubernetes.io/linode-loadbalancer-subnet-name: "subnet1" +``` + +If CCM is started with `--nodebalancer-backend-ipv4-subnet` flag, then it will not allow provisioning of nodebalancer unless subnet specified in service annotation lie within the subnet specified using the flag. This is to prevent accidental overlap between nodebalancer backend ips and pod CIDRs. + ## Advanced Configuration ### Using Existing NodeBalancers @@ -198,9 +247,9 @@ metadata: - [Service Annotations](annotations.md) - [Firewall Configuration](firewall.md) - [Session Affinity](session-affinity.md) -- [Environment Variables](environment.md) +- [Environment Variables and Flags](environment.md) - [Route Configuration](routes.md) - [Linode NodeBalancer Documentation](https://www.linode.com/docs/products/networking/nodebalancers/) -- [Cilium BGP Documentation](https://docs.cilium.io/en/stable/network/bgp-control-plane/) +- [Cilium BGP Documentation](https://docs.cilium.io/en/stable/network/bgp-control-plane/bgp-control-plane/) - [Basic Service Examples](../examples/basic.md) - [Advanced Configuration Examples](../examples/advanced.md) diff --git a/docs/getting-started/helm-installation.md b/docs/getting-started/helm-installation.md index 449166e6..2395398c 100644 --- a/docs/getting-started/helm-installation.md +++ b/docs/getting-started/helm-installation.md @@ -25,6 +25,10 @@ routeController: clusterCIDR: "10.0.0.0/8" configureCloudRoutes: true +# Optional: Assign node internal IPs from VPCs without enabling route controller +# Not required if specified in routeController +vpcNames: "" # Comma separated VPC names + # Optional: Configure shared IP load balancing instead of NodeBalancers (requires Cilium CNI and BGP Control Plane enabled) sharedIPLoadBalancing: loadBalancerType: cilium-bgp diff --git a/e2e/bgp-test/lb-cilium-bgp/chainsaw-test.yaml b/e2e/bgp-test/lb-cilium-bgp/chainsaw-test.yaml index 979bcac6..4a0b081a 100644 --- a/e2e/bgp-test/lb-cilium-bgp/chainsaw-test.yaml +++ b/e2e/bgp-test/lb-cilium-bgp/chainsaw-test.yaml @@ -3,6 +3,8 @@ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: cilium-bgp-test + labels: + all: spec: namespace: "cilium-bgp-test" steps: diff --git a/e2e/subnet-test/chainsaw-test.yaml b/e2e/subnet-test/chainsaw-test.yaml new file mode 100644 index 00000000..cd69d41f --- /dev/null +++ b/e2e/subnet-test/chainsaw-test.yaml @@ -0,0 +1,78 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: subnet-filtering-test + labels: + all: +spec: + bindings: + - name: fwname + value: (join('-', ['ccm-fwtest', env('CLUSTER_NAME')])) + namespace: "subnet-filtering-test" + steps: + - name: Check if the CCM for each cluster focus on their individual subnets + try: + - script: + content: | + set -e + + if [ -z "$FIRST_CONFIG" ] || [ -z "$SECOND_CONFIG" ] || [ -z "$LINODE_TOKEN" ]; then + echo "Error: FIRST_CONFIG, SECOND_CONFIG, and LINODE_TOKEN environment variables must be set" + exit 1 + fi + + # Iterate through both clusters + for config in "$FIRST_CONFIG" "$SECOND_CONFIG"; do + # Get all node names + nodes=$(KUBECONFIG=$config kubectl get nodes -o jsonpath='{.items[*].metadata.name}') + if [ -z "$nodes" ]; then + echo "Error: No nodes found in cluster" + exit 1 + fi + + # Process each node + for node in $nodes; do + echo "Checking node: $node" + + # Get pod CIDR and instance ID + pod_cidr=$(KUBECONFIG=$config kubectl get node "$node" -o jsonpath='{.spec.podCIDR}') + instance_id=$(KUBECONFIG=$config kubectl get node "$node" -o jsonpath='{.spec.providerID}' | sed 's/linode:\/\///') + + echo " Pod CIDR: $pod_cidr" + echo " Instance ID: $instance_id" + + # Get interface details for this config + interfaces=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/linode/instances/$instance_id/configs" \ + | jq -r '.data[0].interfaces') + + # Check if pod CIDR is in the VPC interface IP ranges + if echo "$interfaces" | jq -e --arg cidr "$pod_cidr" '.[] | select(.purpose == "vpc") | .ip_ranges[] | select(. == $cidr)' > /dev/null; then + echo "Pod CIDR found in VPC interface configuration" + else + echo "Pod CIDR not found in VPC interface configuration" + echo "Current VPC interface configuration:" + echo "$interfaces" | jq '.[] | select(.purpose == "vpc")' + fi + + echo "---" + done + done + + # Grep logs of each cluster for IPs from the other cluster + echo "Checking logs of each CCM" + if ! [ $(KUBECONFIG=$FIRST_CONFIG kubectl logs daemonset/ccm-linode -n kube-system | grep "172.16" | wc -l) -eq 0 ]; then + echo "IP address from testing subnet found in logs of test cluster" + exit 1 + fi + + if ! [ $(KUBECONFIG=$SECOND_CONFIG kubectl logs daemonset/ccm-linode -n kube-system | grep "10.192" | wc -l) -eq 0 ]; then + echo "IP address from default subnet found in logs of second cluster" + exit 1 + fi + + check: + ($error == null): true + (contains($stdout, 'Pod CIDR not found in VPC interface configuration')): false diff --git a/e2e/test/ccm-resources/chainsaw-test.yaml b/e2e/test/ccm-resources/chainsaw-test.yaml new file mode 100644 index 00000000..462495b6 --- /dev/null +++ b/e2e/test/ccm-resources/chainsaw-test.yaml @@ -0,0 +1,13 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: check-ccm-resources + labels: + all: +spec: + steps: + - name: Check if CCM is deployed + try: + - assert: + file: ../assert-ccm-resources.yaml diff --git a/e2e/test/fw-use-specified-nb/chainsaw-test.yaml b/e2e/test/fw-use-specified-nb/chainsaw-test.yaml index fb0a0148..f95c4fc5 100644 --- a/e2e/test/fw-use-specified-nb/chainsaw-test.yaml +++ b/e2e/test/fw-use-specified-nb/chainsaw-test.yaml @@ -3,16 +3,15 @@ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: fw-use-specified-nb + labels: + all: + lke: spec: bindings: - name: fwname value: (join('-', ['ccm-fwtest', env('CLUSTER_NAME')])) namespace: "fw-use-specified-nb" steps: - - name: Check if CCM is deployed - try: - - assert: - file: ../assert-ccm-resources.yaml - name: Create firewall, Create pods and services try: - script: diff --git a/e2e/test/lb-created-with-new-nb-id/chainsaw-test.yaml b/e2e/test/lb-created-with-new-nb-id/chainsaw-test.yaml index df1a0952..16d7ec3e 100644 --- a/e2e/test/lb-created-with-new-nb-id/chainsaw-test.yaml +++ b/e2e/test/lb-created-with-new-nb-id/chainsaw-test.yaml @@ -3,13 +3,12 @@ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: lb-created-with-new-nb-id + labels: + all: + lke: spec: namespace: "lb-created-with-new-nb-id" steps: - - name: Check if CCM is deployed - try: - - assert: - file: ../assert-ccm-resources.yaml - name: Create nodebalancer and create resources try: - script: diff --git a/e2e/test/lb-created-with-specified-nb-id/chainsaw-test.yaml b/e2e/test/lb-created-with-specified-nb-id/chainsaw-test.yaml index 0b77dbe9..1bb82e82 100644 --- a/e2e/test/lb-created-with-specified-nb-id/chainsaw-test.yaml +++ b/e2e/test/lb-created-with-specified-nb-id/chainsaw-test.yaml @@ -3,13 +3,12 @@ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: lb-created-with-specified-nb-id + labels: + all: + lke: spec: namespace: "lb-created-with-specified-nb-id" steps: - - name: Check if CCM is deployed - try: - - assert: - file: ../assert-ccm-resources.yaml - name: Create nodebalancer and create resources try: - script: diff --git a/e2e/test/lb-delete-svc-no-nb/chainsaw-test.yaml b/e2e/test/lb-delete-svc-no-nb/chainsaw-test.yaml index 723a5d35..1f75e928 100644 --- a/e2e/test/lb-delete-svc-no-nb/chainsaw-test.yaml +++ b/e2e/test/lb-delete-svc-no-nb/chainsaw-test.yaml @@ -3,13 +3,12 @@ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: lb-delete-svc-no-nb + labels: + all: + lke: spec: namespace: "lb-delete-svc-no-nb" steps: - - name: Check if CCM is deployed - try: - - assert: - file: ../assert-ccm-resources.yaml - name: Create nodebalancer and create resources try: - script: diff --git a/e2e/test/lb-delete-svc-use-new-nbid/chainsaw-test.yaml b/e2e/test/lb-delete-svc-use-new-nbid/chainsaw-test.yaml index 7369d478..7bec4e56 100644 --- a/e2e/test/lb-delete-svc-use-new-nbid/chainsaw-test.yaml +++ b/e2e/test/lb-delete-svc-use-new-nbid/chainsaw-test.yaml @@ -3,13 +3,12 @@ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: lb-delete-svc-use-new-nbid + labels: + all: + lke: spec: namespace: "lb-delete-svc-use-new-nbid" steps: - - name: Check if CCM is deployed - try: - - assert: - file: ../assert-ccm-resources.yaml - name: Create nodebalancer and create resources try: - script: diff --git a/e2e/test/lb-delete-svc-use-specified-nb/chainsaw-test.yaml b/e2e/test/lb-delete-svc-use-specified-nb/chainsaw-test.yaml index 99ceb8e4..c8c593aa 100644 --- a/e2e/test/lb-delete-svc-use-specified-nb/chainsaw-test.yaml +++ b/e2e/test/lb-delete-svc-use-specified-nb/chainsaw-test.yaml @@ -3,13 +3,12 @@ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: lb-delete-svc-use-specified-nb + labels: + all: + lke: spec: namespace: "lb-delete-svc-use-specified-nb" steps: - - name: Check if CCM is deployed - try: - - assert: - file: ../assert-ccm-resources.yaml - name: Create nodebalancer and create resources try: - script: diff --git a/e2e/test/lb-fw-delete-acl/chainsaw-test.yaml b/e2e/test/lb-fw-delete-acl/chainsaw-test.yaml index 6d5bd6ed..e46edac9 100644 --- a/e2e/test/lb-fw-delete-acl/chainsaw-test.yaml +++ b/e2e/test/lb-fw-delete-acl/chainsaw-test.yaml @@ -3,13 +3,12 @@ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: lb-fw-delete-acl + labels: + all: + lke: spec: namespace: "lb-fw-delete-acl" steps: - - name: Check if CCM is deployed - try: - - assert: - file: ../assert-ccm-resources.yaml - name: Create pods and services try: - apply: diff --git a/e2e/test/lb-fw-update-acl/chainsaw-test.yaml b/e2e/test/lb-fw-update-acl/chainsaw-test.yaml index 15b05807..de56b1ea 100644 --- a/e2e/test/lb-fw-update-acl/chainsaw-test.yaml +++ b/e2e/test/lb-fw-update-acl/chainsaw-test.yaml @@ -3,13 +3,12 @@ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: lb-fw-update-acl + labels: + all: + lke: spec: namespace: "lb-fw-update-acl" steps: - - name: Check if CCM is deployed - try: - - assert: - file: ../assert-ccm-resources.yaml - name: Create pods and services try: - apply: diff --git a/e2e/test/lb-hostname-only-ingress/chainsaw-test.yaml b/e2e/test/lb-hostname-only-ingress/chainsaw-test.yaml index 69c7cd0e..a8027c43 100644 --- a/e2e/test/lb-hostname-only-ingress/chainsaw-test.yaml +++ b/e2e/test/lb-hostname-only-ingress/chainsaw-test.yaml @@ -3,13 +3,12 @@ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: lb-hostname-only-ingress + labels: + all: + lke: spec: namespace: "lb-hostname-only-ingress" steps: - - name: Check if CCM is deployed - try: - - assert: - file: ../assert-ccm-resources.yaml - name: Create pods and services try: - apply: diff --git a/e2e/test/lb-http-body-health-check/chainsaw-test.yaml b/e2e/test/lb-http-body-health-check/chainsaw-test.yaml index b6246b55..8a77543d 100644 --- a/e2e/test/lb-http-body-health-check/chainsaw-test.yaml +++ b/e2e/test/lb-http-body-health-check/chainsaw-test.yaml @@ -3,13 +3,12 @@ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: lb-http-body-health-check + labels: + all: + lke: spec: namespace: "lb-http-body-health-check" steps: - - name: Check if CCM is deployed - try: - - assert: - file: ../assert-ccm-resources.yaml - name: Create pods and services try: - apply: diff --git a/e2e/test/lb-http-status-health-check/chainsaw-test.yaml b/e2e/test/lb-http-status-health-check/chainsaw-test.yaml index 16f5b728..1eb50d99 100644 --- a/e2e/test/lb-http-status-health-check/chainsaw-test.yaml +++ b/e2e/test/lb-http-status-health-check/chainsaw-test.yaml @@ -3,13 +3,12 @@ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: lb-http-status-health-check + labels: + all: + lke: spec: namespace: "lb-http-status-health-check" steps: - - name: Check if CCM is deployed - try: - - assert: - file: ../assert-ccm-resources.yaml - name: Create pods and services try: - apply: diff --git a/e2e/test/lb-passive-health-check/chainsaw-test.yaml b/e2e/test/lb-passive-health-check/chainsaw-test.yaml index d7479d88..1f6a022b 100644 --- a/e2e/test/lb-passive-health-check/chainsaw-test.yaml +++ b/e2e/test/lb-passive-health-check/chainsaw-test.yaml @@ -3,13 +3,12 @@ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: lb-passive-health-check + labels: + all: + lke: spec: namespace: "lb-passive-health-check" steps: - - name: Check if CCM is deployed - try: - - assert: - file: ../assert-ccm-resources.yaml - name: Create pods and services try: - apply: diff --git a/e2e/test/lb-premium-nb/chainsaw-test.yaml b/e2e/test/lb-premium-nb/chainsaw-test.yaml new file mode 100644 index 00000000..8248a63c --- /dev/null +++ b/e2e/test/lb-premium-nb/chainsaw-test.yaml @@ -0,0 +1,93 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-premium-nb + labels: + all: +spec: + namespace: "lb-premium-nb" + steps: + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Fetch loadbalancer ip and check both pods reachable + try: + - script: + content: | + set -e + IP=$(kubectl get svc svc-test -n $NAMESPACE -o json | jq -r .status.loadBalancer.ingress[0].ip) + + podnames=() + + for i in {1..10}; do + if [[ ${#podnames[@]} -lt 2 ]]; then + output=$(curl -s $IP:80 | jq -e .podName || true) + + if [[ "$output" == *"test-"* ]]; then + unique=true + for i in "${array[@]}"; do + if [[ "$i" == "$output" ]]; then + unique=false + break + fi + done + if [[ "$unique" == true ]]; then + podnames+=($output) + fi + fi + else + break + fi + sleep 10 + done + + if [[ ${#podnames[@]} -lt 2 ]]; then + echo "all pods failed to respond" + else + echo "all pods responded" + fi + check: + ($error == null): true + (contains($stdout, 'all pods responded')): true + - name: Check nodebalancer type + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + for i in {1..10}; do + type=$(curl -s --request GET \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + -H "accept: application/json" \ + "https://api.linode.com/v4/nodebalancers/${nbid}" | jq -r '.type') + + if [[ $type == "premium" ]]; then + echo "nodebalancer type is premium" + break + fi + sleep 5 + done + check: + ($error == null): true + (contains($stdout, 'nodebalancer type is premium')): true diff --git a/e2e/test/lb-premium-nb/create-pods-services.yaml b/e2e/test/lb-premium-nb/create-pods-services.yaml new file mode 100644 index 00000000..fc79d2e4 --- /dev/null +++ b/e2e/test/lb-premium-nb/create-pods-services.yaml @@ -0,0 +1,61 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: lb-premium-nb + name: test +spec: + replicas: 2 + selector: + matchLabels: + app: lb-premium-nb + template: + metadata: + labels: + app: lb-premium-nb + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - simple-lb + topologyKey: kubernetes.io/hostname + weight: 100 + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 8080 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + annotations: + service.beta.kubernetes.io/linode-loadbalancer-nodebalancer-type: premium + labels: + app: lb-premium-nb +spec: + type: LoadBalancer + selector: + app: lb-premium-nb + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 8080 + sessionAffinity: None diff --git a/e2e/test/lb-preserve-annotation-new-nb-specified/chainsaw-test.yaml b/e2e/test/lb-preserve-annotation-new-nb-specified/chainsaw-test.yaml index d7f2661d..1da5a05c 100644 --- a/e2e/test/lb-preserve-annotation-new-nb-specified/chainsaw-test.yaml +++ b/e2e/test/lb-preserve-annotation-new-nb-specified/chainsaw-test.yaml @@ -3,13 +3,12 @@ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: lb-preserve-annotation-new-nb-specified + labels: + all: + lke: spec: namespace: "lb-preserve-annotation-new-nb-specified" steps: - - name: Check if CCM is deployed - try: - - assert: - file: ../assert-ccm-resources.yaml - name: Create resources try: - apply: diff --git a/e2e/test/lb-preserve-annotation-svc-delete/chainsaw-test.yaml b/e2e/test/lb-preserve-annotation-svc-delete/chainsaw-test.yaml index 2e33d401..0c50c15a 100644 --- a/e2e/test/lb-preserve-annotation-svc-delete/chainsaw-test.yaml +++ b/e2e/test/lb-preserve-annotation-svc-delete/chainsaw-test.yaml @@ -3,13 +3,12 @@ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: lb-preserve-annotation-svc-delete + labels: + all: + lke: spec: namespace: "lb-preserve-annotation-svc-delete" steps: - - name: Check if CCM is deployed - try: - - assert: - file: ../assert-ccm-resources.yaml - name: Create resources try: - apply: diff --git a/e2e/test/lb-simple/chainsaw-test.yaml b/e2e/test/lb-simple/chainsaw-test.yaml index 2661961a..e24a78be 100644 --- a/e2e/test/lb-simple/chainsaw-test.yaml +++ b/e2e/test/lb-simple/chainsaw-test.yaml @@ -3,13 +3,12 @@ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: lb-simple + labels: + all: + lke: spec: namespace: "lb-simple" steps: - - name: Check if CCM is deployed - try: - - assert: - file: ../assert-ccm-resources.yaml - name: Create pods and services try: - apply: diff --git a/e2e/test/lb-single-tls/chainsaw-test.yaml b/e2e/test/lb-single-tls/chainsaw-test.yaml index a75e4964..b27409e5 100644 --- a/e2e/test/lb-single-tls/chainsaw-test.yaml +++ b/e2e/test/lb-single-tls/chainsaw-test.yaml @@ -3,13 +3,12 @@ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: lb-single-tls + labels: + all: + lke: spec: namespace: "lb-single-tls" steps: - - name: Check if CCM is deployed - try: - - assert: - file: ../assert-ccm-resources.yaml - name: Create secret try: - script: diff --git a/e2e/test/lb-tcp-connection-health-check/chainsaw-test.yaml b/e2e/test/lb-tcp-connection-health-check/chainsaw-test.yaml index f59f14e2..11b9e3be 100644 --- a/e2e/test/lb-tcp-connection-health-check/chainsaw-test.yaml +++ b/e2e/test/lb-tcp-connection-health-check/chainsaw-test.yaml @@ -3,13 +3,12 @@ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: lb-tcp-connection-health-check + labels: + all: + lke: spec: namespace: "lb-tcp-connection-health-check" steps: - - name: Check if CCM is deployed - try: - - assert: - file: ../assert-ccm-resources.yaml - name: Create pods and services try: - apply: diff --git a/e2e/test/lb-updated-with-nb-id/chainsaw-test.yaml b/e2e/test/lb-updated-with-nb-id/chainsaw-test.yaml index c897979b..346e67c9 100644 --- a/e2e/test/lb-updated-with-nb-id/chainsaw-test.yaml +++ b/e2e/test/lb-updated-with-nb-id/chainsaw-test.yaml @@ -3,13 +3,12 @@ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: lb-updated-with-nb-id + labels: + all: + lke: spec: namespace: "lb-updated-with-nb-id" steps: - - name: Check if CCM is deployed - try: - - assert: - file: ../assert-ccm-resources.yaml - name: Create pods and services try: - apply: diff --git a/e2e/test/lb-with-http-to-https/chainsaw-test.yaml b/e2e/test/lb-with-http-to-https/chainsaw-test.yaml index 745b77ad..c9f7670e 100644 --- a/e2e/test/lb-with-http-to-https/chainsaw-test.yaml +++ b/e2e/test/lb-with-http-to-https/chainsaw-test.yaml @@ -3,13 +3,12 @@ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: lb-with-http-to-https + labels: + all: + lke: spec: namespace: "lb-with-http-to-https" steps: - - name: Check if CCM is deployed - try: - - assert: - file: ../assert-ccm-resources.yaml - name: Create pods and services try: - apply: diff --git a/e2e/test/lb-with-multiple-http-https-ports/chainsaw-test.yaml b/e2e/test/lb-with-multiple-http-https-ports/chainsaw-test.yaml index da73d113..501a8801 100644 --- a/e2e/test/lb-with-multiple-http-https-ports/chainsaw-test.yaml +++ b/e2e/test/lb-with-multiple-http-https-ports/chainsaw-test.yaml @@ -3,13 +3,12 @@ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: lb-with-multiple-http-https-ports + labels: + all: + lke: spec: namespace: "lb-with-multiple-http-https-ports" steps: - - name: Check if CCM is deployed - try: - - assert: - file: ../assert-ccm-resources.yaml - name: Create pods and services try: - apply: diff --git a/e2e/test/lb-with-node-addition/chainsaw-test.yaml b/e2e/test/lb-with-node-addition/chainsaw-test.yaml index 62f17873..f057413c 100644 --- a/e2e/test/lb-with-node-addition/chainsaw-test.yaml +++ b/e2e/test/lb-with-node-addition/chainsaw-test.yaml @@ -3,13 +3,11 @@ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: lb-with-node-addition + labels: + all: spec: namespace: "lb-with-node-addition" steps: - - name: Check if CCM is deployed - try: - - assert: - file: ../assert-ccm-resources.yaml - name: Create resources try: - apply: diff --git a/e2e/test/lb-with-proxyprotocol-default-annotation/chainsaw-test.yaml b/e2e/test/lb-with-proxyprotocol-default-annotation/chainsaw-test.yaml index e8e07665..0393ce4b 100644 --- a/e2e/test/lb-with-proxyprotocol-default-annotation/chainsaw-test.yaml +++ b/e2e/test/lb-with-proxyprotocol-default-annotation/chainsaw-test.yaml @@ -3,13 +3,12 @@ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: lb-with-proxyprotocol-default-annotation + labels: + all: + lke: spec: namespace: "lb-with-proxyprotocol-default-annotation" steps: - - name: Check if CCM is deployed - try: - - assert: - file: ../assert-ccm-resources.yaml - name: Create pods and services try: - apply: diff --git a/e2e/test/lb-with-proxyprotocol-override/chainsaw-test.yaml b/e2e/test/lb-with-proxyprotocol-override/chainsaw-test.yaml index 384fdc4a..7f7814a5 100644 --- a/e2e/test/lb-with-proxyprotocol-override/chainsaw-test.yaml +++ b/e2e/test/lb-with-proxyprotocol-override/chainsaw-test.yaml @@ -3,13 +3,12 @@ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: lb-with-proxyprotocol-override + labels: + all: + lke: spec: namespace: "lb-with-proxyprotocol-override" steps: - - name: Check if CCM is deployed - try: - - assert: - file: ../assert-ccm-resources.yaml - name: Create pods and services try: - apply: diff --git a/e2e/test/lb-with-proxyprotocol-port-specific/chainsaw-test.yaml b/e2e/test/lb-with-proxyprotocol-port-specific/chainsaw-test.yaml index 61cc3d25..805525f2 100644 --- a/e2e/test/lb-with-proxyprotocol-port-specific/chainsaw-test.yaml +++ b/e2e/test/lb-with-proxyprotocol-port-specific/chainsaw-test.yaml @@ -3,13 +3,12 @@ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: lb-with-proxyprotocol-port-specific + labels: + all: + lke: spec: namespace: "lb-with-proxyprotocol-port-specific" steps: - - name: Check if CCM is deployed - try: - - assert: - file: ../assert-ccm-resources.yaml - name: Create pods and services try: - apply: diff --git a/e2e/test/lb-with-proxyprotocol-set/chainsaw-test.yaml b/e2e/test/lb-with-proxyprotocol-set/chainsaw-test.yaml index c4a43b2d..64fbceb0 100644 --- a/e2e/test/lb-with-proxyprotocol-set/chainsaw-test.yaml +++ b/e2e/test/lb-with-proxyprotocol-set/chainsaw-test.yaml @@ -3,13 +3,12 @@ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: lb-with-proxyprotocol-set + labels: + all: + lke: spec: namespace: "lb-with-proxyprotocol-set" steps: - - name: Check if CCM is deployed - try: - - assert: - file: ../assert-ccm-resources.yaml - name: Create pods and services try: - apply: diff --git a/e2e/test/lb-with-vpc-backends/chainsaw-test.yaml b/e2e/test/lb-with-vpc-backends/chainsaw-test.yaml new file mode 100644 index 00000000..50788d39 --- /dev/null +++ b/e2e/test/lb-with-vpc-backends/chainsaw-test.yaml @@ -0,0 +1,75 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/kyverno/chainsaw/main/.schemas/json/test-chainsaw-v1alpha1.json +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: lb-with-vpc-backends + labels: + all: +spec: + namespace: "lb-with-vpc-backends" + steps: + - name: Create pods and services + try: + - apply: + file: create-pods-services.yaml + catch: + - describe: + apiVersion: v1 + kind: Pod + - describe: + apiVersion: v1 + kind: Service + - name: Check endpoints exist + try: + - assert: + resource: + apiVersion: v1 + kind: Endpoints + metadata: + name: svc-test + (subsets[0].addresses != null): true + (subsets[0].ports != null): true + - name: Check that loadbalancer ip is assigned + try: + - assert: + resource: + apiVersion: v1 + kind: Service + metadata: + name: svc-test + status: + (loadBalancer.ingress[0].ip != null): true + - name: Check NodeBalancerConfig for backend ips + try: + - script: + content: | + set -e + + nbid=$(KUBECONFIG=$KUBECONFIG NAMESPACE=$NAMESPACE LINODE_TOKEN=$LINODE_TOKEN ../scripts/get-nb-id.sh) + + nbconfig=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs") + + config_id=$(echo $nbconfig | jq -r '.data[] | select(.port == 80) | .id') + + # Get nodes from the config + nodes=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/nodebalancers/$nbid/configs/$config_id/nodes") + + # Extract all addresses and remove ports + addresses=$(echo "$json_data" | jq -r '.data[].address' | sed 's/:[0-9]*$//') + + for ip in $addresses; do + if [[ $ip =~ ^10\.0\.0\.[0-9]+$ ]]; then + echo "$ip is in the 10.0.0.0/8 subnet" + else + echo "$ip is NOT in the 10.0.0.0/8 subnet" + fi + done + check: + ($error): ~ + (contains($stdout, 'is NOT in the 10.0.0.0/8 subnet')): false diff --git a/e2e/test/lb-with-vpc-backends/create-pods-services.yaml b/e2e/test/lb-with-vpc-backends/create-pods-services.yaml new file mode 100644 index 00000000..91d5017a --- /dev/null +++ b/e2e/test/lb-with-vpc-backends/create-pods-services.yaml @@ -0,0 +1,49 @@ +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app: vpc-backends + name: test +spec: + replicas: 1 + selector: + matchLabels: + app: vpc-backends + template: + metadata: + labels: + app: vpc-backends + spec: + containers: + - image: appscode/test-server:2.3 + name: test + ports: + - name: http-1 + containerPort: 80 + protocol: TCP + env: + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name +--- +apiVersion: v1 +kind: Service +metadata: + name: svc-test + annotations: + service.beta.kubernetes.io/linode-loadbalancer-backend-ipv4-range: "10.100.0.0/30" + labels: + app: vpc-backends +spec: + type: LoadBalancer + selector: + app: vpc-backends + ports: + - name: http-1 + protocol: TCP + port: 80 + targetPort: 80 + sessionAffinity: None diff --git a/e2e/test/route-controller-test/chainsaw-test.yaml b/e2e/test/route-controller-test/chainsaw-test.yaml index 236f13df..117d9e56 100644 --- a/e2e/test/route-controller-test/chainsaw-test.yaml +++ b/e2e/test/route-controller-test/chainsaw-test.yaml @@ -3,16 +3,14 @@ apiVersion: chainsaw.kyverno.io/v1alpha1 kind: Test metadata: name: route-controller-test + labels: + all: spec: bindings: - name: fwname value: (join('-', ['ccm-fwtest', env('CLUSTER_NAME')])) namespace: "route-controller-test" steps: - - name: Check if CCM is deployed - try: - - assert: - file: ../assert-ccm-resources.yaml - name: Check if the route controller updated the config for the linode try: - script: @@ -63,3 +61,72 @@ spec: check: ($error == null): true (contains($stdout, 'Pod CIDR not found in VPC interface configuration')): false + - name: Modify instance label using linodeAPI and check if it still has the route configured + try: + - script: + content: | + set -e + + # Use last node to change its label + node=$(kubectl get nodes -o jsonpath='{.items[-1].metadata.name}') + if [ -z "$node" ]; then + echo "Error: No nodes found in cluster" + exit 1 + fi + + instance_id=$(kubectl get node "$node" -o jsonpath='{.spec.providerID}' | sed 's/linode:\/\///') + pod_cidr=$(kubectl get node "$node" -o jsonpath='{.spec.podCIDR}') + newNodeName=${node}-test + + # Change label of linode + resp=$(curl -X PUT --write-out "%{http_code}\n" \ + --silent --output /dev/null \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/linode/instances/$instance_id" \ + --data "{\"label\": \"$newNodeName\"}") + + if [[ $resp != "200" ]]; then + echo "Failed updating node label" + exit 1 + fi + + currentLabel=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/linode/instances/$instance_id" \ + | jq -r '.label') + + if [[ $currentLabel == $newNodeName ]]; then + echo "Labels match" + fi + + # sleep for a minute for route_controller to reconcile few times + sleep 60 + + # Get interface details for this config + interfaces=$(curl -s \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + "https://api.linode.com/v4/linode/instances/$instance_id/configs" \ + | jq -r '.data[0].interfaces') + + # Check if pod CIDR still exists in the VPC interface IP ranges + if echo "$interfaces" | jq -e --arg cidr "$pod_cidr" '.[] | select(.purpose == "vpc") | .ip_ranges[] | select(. == $cidr)' > /dev/null; then + echo "Pod CIDR found in VPC interface configuration" + else + echo "Pod CIDR not found in VPC interface configuration" + echo "Current VPC interface configuration:" + echo "$interfaces" | jq '.[] | select(.purpose == "vpc")' + fi + + # revert label to original value + curl -X PUT --write-out "%{http_code}\n" \ + --silent --output /dev/null \ + -H "Authorization: Bearer $LINODE_TOKEN" \ + -H "Content-Type: application/json" \ + "https://api.linode.com/v4/linode/instances/$instance_id" \ + --data "{\"label\": \"$node\"}" + check: + ($error == null): true + (contains($stdout, 'Failed updating node label')): false + (contains($stdout, 'Labels match')): true + (contains($stdout, 'Pod CIDR found in VPC interface configuration')): true diff --git a/go.mod b/go.mod index d22a712e..1ed11434 100644 --- a/go.mod +++ b/go.mod @@ -1,26 +1,26 @@ module github.com/linode/linode-cloud-controller-manager -go 1.23.0 +go 1.24 -toolchain go1.23.3 +toolchain go1.24.1 require ( github.com/appscode/go v0.0.0-20201105063637-5613f3b8169f - github.com/cilium/cilium v1.16.6 + github.com/cilium/cilium v1.17.2 github.com/getsentry/sentry-go v0.31.1 github.com/golang/mock v1.6.0 github.com/google/uuid v1.6.0 - github.com/hexdigest/gowrap v1.4.1 - github.com/linode/linodego v1.47.0 - github.com/prometheus/client_golang v1.20.5 + github.com/hexdigest/gowrap v1.4.2 + github.com/linode/linodego v1.48.1 + github.com/prometheus/client_golang v1.21.1 github.com/spf13/pflag v1.0.6 github.com/stretchr/testify v1.10.0 golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e - k8s.io/api v0.32.1 - k8s.io/apimachinery v0.32.1 - k8s.io/client-go v0.32.1 - k8s.io/cloud-provider v0.32.1 - k8s.io/component-base v0.32.1 + k8s.io/api v0.32.3 + k8s.io/apimachinery v0.32.3 + k8s.io/client-go v0.32.3 + k8s.io/cloud-provider v0.32.3 + k8s.io/component-base v0.32.3 k8s.io/klog/v2 v2.130.1 k8s.io/utils v0.0.0-20241210054802-24370beab758 ) @@ -38,9 +38,11 @@ require ( github.com/blang/semver/v4 v4.0.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/cilium/ebpf v0.16.0 // indirect - github.com/cilium/hive v0.0.0-20241213121623-605c1412b9b3 // indirect + github.com/cilium/ebpf v0.17.1 // indirect + github.com/cilium/hive v0.0.0-20250121145729-e67f66eb0375 // indirect github.com/cilium/proxy v0.0.0-20241216122539-268a44ec93e9 // indirect + github.com/cilium/statedb v0.3.6 // indirect + github.com/cilium/stream v0.0.0-20241203114243-53c3e5d79744 // indirect github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect @@ -60,17 +62,17 @@ require ( github.com/go-openapi/strfmt v0.23.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/go-openapi/validate v0.24.0 // indirect - github.com/go-resty/resty/v2 v2.16.3 // indirect + github.com/go-resty/resty/v2 v2.16.5 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect github.com/google/cel-go v0.22.1 // indirect github.com/google/gnostic-models v0.6.9 // indirect - github.com/google/go-cmp v0.6.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/gopacket v1.1.19 // indirect + github.com/gopacket/gopacket v1.3.1 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect github.com/hashicorp/hcl v1.0.1-vault-7 // indirect @@ -80,13 +82,12 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/klauspost/compress v1.17.11 // indirect - github.com/kr/pretty v0.3.1 // indirect - github.com/kr/text v0.2.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/mackerelio/go-osstat v0.2.5 // indirect github.com/magiconair/properties v1.8.9 // indirect github.com/mailru/easyjson v0.9.0 // indirect github.com/mitchellh/copystructure v1.1.2 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.1 // indirect github.com/moby/term v0.5.0 // indirect @@ -100,9 +101,8 @@ require ( github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.61.0 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/rogpeppe/go-internal v1.13.1 // indirect github.com/sagikazarmark/locafero v0.6.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/sasha-s/go-deadlock v0.3.5 // indirect @@ -115,7 +115,7 @@ require ( github.com/spf13/viper v1.19.0 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect - github.com/vishvananda/netlink v1.3.1-0.20241022031324-976bd8de7d81 // indirect + github.com/vishvananda/netlink v1.3.1-0.20250221194427-0af32151e72b // indirect github.com/vishvananda/netns v0.0.5 // indirect github.com/x448/float16 v0.8.4 // indirect go.etcd.io/etcd/api/v3 v3.5.17 // indirect @@ -136,33 +136,35 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect - golang.org/x/crypto v0.32.0 // indirect + golang.org/x/crypto v0.35.0 // indirect golang.org/x/mod v0.22.0 // indirect - golang.org/x/net v0.34.0 // indirect - golang.org/x/oauth2 v0.25.0 // indirect - golang.org/x/sync v0.10.0 // indirect - golang.org/x/sys v0.29.0 // indirect - golang.org/x/term v0.28.0 // indirect - golang.org/x/text v0.21.0 // indirect + golang.org/x/net v0.36.0 // indirect + golang.org/x/oauth2 v0.27.0 // indirect + golang.org/x/sync v0.11.0 // indirect + golang.org/x/sys v0.30.0 // indirect + golang.org/x/term v0.29.0 // indirect + golang.org/x/text v0.22.0 // indirect golang.org/x/time v0.8.0 // indirect golang.org/x/tools v0.29.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect google.golang.org/grpc v1.69.0 // indirect - google.golang.org/protobuf v1.35.2 // indirect + google.golang.org/protobuf v1.36.1 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.30.2 // indirect - k8s.io/apiserver v0.32.1 // indirect - k8s.io/component-helpers v0.32.1 // indirect - k8s.io/controller-manager v0.32.1 // indirect - k8s.io/kms v0.32.1 // indirect + k8s.io/apiextensions-apiserver v0.32.0 // indirect + k8s.io/apiserver v0.32.3 // indirect + k8s.io/component-helpers v0.32.3 // indirect + k8s.io/controller-manager v0.32.3 // indirect + k8s.io/kms v0.32.3 // indirect k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.1 // indirect + sigs.k8s.io/gateway-api v1.2.1 // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + sigs.k8s.io/mcs-api v0.1.1-0.20250116162235-62ede9a032dc // indirect sigs.k8s.io/structured-merge-diff/v4 v4.5.0 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index 215873ec..a4533348 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,7 @@ cel.dev/expr v0.19.1 h1:NciYrtDRIR0lNCnH1LFJegdjspNx9fI59O7TWcua/W4= cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU= -github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= @@ -31,18 +31,18 @@ github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMr github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cilium/cilium v1.16.6 h1:KRQn5knO48ERxB6SusQo02nYmE0NO0qiLlvqhwBTXbI= -github.com/cilium/cilium v1.16.6/go.mod h1:NnDWQiYmPef24+pX2U/V85uL8eUTJSFUUjMEy41lGPA= -github.com/cilium/ebpf v0.16.0 h1:+BiEnHL6Z7lXnlGUsXQPPAE7+kenAd4ES8MQ5min0Ok= -github.com/cilium/ebpf v0.16.0/go.mod h1:L7u2Blt2jMM/vLAVgjxluxtBKlz3/GWjB0dMOEngfwE= -github.com/cilium/hive v0.0.0-20241213121623-605c1412b9b3 h1:RfmUH1ouzj0LzORYJRhp43e1rlGpx6GNv4NIRUakU2w= -github.com/cilium/hive v0.0.0-20241213121623-605c1412b9b3/go.mod h1:pI2GJ1n3SLKIQVFrKF7W6A6gb6BQkZ+3Hp4PAEo5SuI= +github.com/cilium/cilium v1.17.2 h1:hMahLKho06pzcAk8X+Co7jXhhwrj79rkTsy5FFehr8Q= +github.com/cilium/cilium v1.17.2/go.mod h1:BMwiENNHcSrSsx59fmzyu9V5pBKzav8bWawiA2PcA7U= +github.com/cilium/ebpf v0.17.1 h1:G8mzU81R2JA1nE5/8SRubzqvBMmAmri2VL8BIZPWvV0= +github.com/cilium/ebpf v0.17.1/go.mod h1:vay2FaYSmIlv3r8dNACd4mW/OCaZLJKJOo+IHBvCIO8= +github.com/cilium/hive v0.0.0-20250121145729-e67f66eb0375 h1:EhoCO0AI3qJavnhfAls4w7VpVVpAr12wIh293sNA0hQ= +github.com/cilium/hive v0.0.0-20250121145729-e67f66eb0375/go.mod h1:pI2GJ1n3SLKIQVFrKF7W6A6gb6BQkZ+3Hp4PAEo5SuI= github.com/cilium/proxy v0.0.0-20241216122539-268a44ec93e9 h1:3m0eujK8+y8cKqkQsLSulES72gFayNgcaGXlpwc6bKY= github.com/cilium/proxy v0.0.0-20241216122539-268a44ec93e9/go.mod h1:1jlssjN+8AsZeex4+7ERavw5vRa/lce/ybVRamfeQSU= -github.com/cilium/statedb v0.2.4 h1:jCyXGcsiXgpJSfpfRRGKd+TD3U1teeDtOnqCyErsHsI= -github.com/cilium/statedb v0.2.4/go.mod h1:KPwsudjhZ90zoBguYMtssKpstR74jVKd/D+73PZy+sg= -github.com/cilium/stream v0.0.0-20240226091623-f979d32855f8 h1:j6VF1s6gz3etRH5ObCr0UUyJblP9cK5fbgkQTz8fTRA= -github.com/cilium/stream v0.0.0-20240226091623-f979d32855f8/go.mod h1:/e83AwqvNKpyg4n3C41qmnmj1x2G9DwzI+jb7GkF4lI= +github.com/cilium/statedb v0.3.6 h1:dGwzZTJgVWlnG7io0Wl0XsI7ULsz2TbNqH8Ag+dP6is= +github.com/cilium/statedb v0.3.6/go.mod h1:n2lNVxi8vz5Up1Y1rRD++aQP2izQA932fUwTkedKSV0= +github.com/cilium/stream v0.0.0-20241203114243-53c3e5d79744 h1:f+CgYUy2YyZ2EX31QSqf3vwFiJJQSAMIQLn4d3QQYno= +github.com/cilium/stream v0.0.0-20241203114243-53c3e5d79744/go.mod h1:/e83AwqvNKpyg4n3C41qmnmj1x2G9DwzI+jb7GkF4lI= github.com/cncf/xds/go v0.0.0-20241213214725-57cfbe6fad57 h1:put7Je9ZyxbHtwr7IqGrW4LLVUupJQ2gbsDshKISSgU= github.com/cncf/xds/go v0.0.0-20241213214725-57cfbe6fad57/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= @@ -52,7 +52,6 @@ github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03V github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -108,8 +107,9 @@ github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3Bum github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI= github.com/go-quicktest/qt v1.101.0/go.mod h1:14Bz/f7NwaXPtdYEgzsx46kqSxVwTbzVZsDC26tQJow= -github.com/go-resty/resty/v2 v2.16.3 h1:zacNT7lt4b8M/io2Ahj6yPypL7bqx9n1iprfQuodV+E= -github.com/go-resty/resty/v2 v2.16.3/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA= +github.com/go-resty/resty/v2 v2.16.5 h1:hBKqmWrr7uRc3euHVqmh1HTHcKn99Smr7o5spptdhTM= +github.com/go-resty/resty/v2 v2.16.5/go.mod h1:hkJtXbA2iKHzJheXYvQ8snQES5ZLGKMwQ07xAwp/fiA= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -117,8 +117,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gojuno/minimock/v3 v3.0.10 h1:0UbfgdLHaNRPHWF/RFYPkwxV2KI+SE4tR0dDSFMD7+A= github.com/gojuno/minimock/v3 v3.0.10/go.mod h1:CFXcUJYnBe+1QuNzm+WmdPYtvi/+7zQcPcyQGsbcIXg= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.5.1 h1:JdqV9zKUdtaa9gdPlywC3aeoEsR681PlKC+4F5gQgeo= +github.com/golang-jwt/jwt/v4 v4.5.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= @@ -135,21 +135,21 @@ github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63Kqpo github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= -github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gopacket/gopacket v1.3.1 h1:ZppWyLrOJNZPe5XkdjLbtuTkfQoxQ0xyMJzQCqtqaPU= +github.com/gopacket/gopacket v1.3.1/go.mod h1:3I13qcqSpB2R9fFQg866OOgzylYkZxLTmkvcXhvf6qg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= @@ -163,8 +163,8 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5 github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= github.com/hashicorp/hcl v1.0.1-vault-7 h1:ag5OxFVy3QYTFTJODRzTKVZ6xvdfLLCA1cy/Y6xGI0I= github.com/hashicorp/hcl v1.0.1-vault-7/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM= -github.com/hexdigest/gowrap v1.4.1 h1:gZS/XE6ClEHskmhu1bNd0d4wWYcuDzUNsTr7eXC9TYQ= -github.com/hexdigest/gowrap v1.4.1/go.mod h1:s+1hE6qakgdaaLqgdwPAj5qKYVBCSbPJhEbx+I1ef/Q= +github.com/hexdigest/gowrap v1.4.2 h1:crtk5lGwHCROa77mKcP/iQ50eh7z6mBjXsg4U492gfc= +github.com/hexdigest/gowrap v1.4.2/go.mod h1:s+1hE6qakgdaaLqgdwPAj5qKYVBCSbPJhEbx+I1ef/Q= github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= @@ -199,8 +199,10 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/linode/linodego v1.47.0 h1:6MFNCyzWbr8Rhl4r7d5DwZLwxvFIsM4ARH6W0KS/R0U= -github.com/linode/linodego v1.47.0/go.mod h1:vyklQRzZUWhFVBZdYx4dcYJU/gG9yKB9VUcUs6ub0Lk= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/linode/linodego v1.48.1 h1:Ojw1S+K5jJr1dggO8/H6r4FINxXnJbOU5GkbpaTfmhU= +github.com/linode/linodego v1.48.1/go.mod h1:fc3t60If8X+yZTFAebhCnNDFrhwQhq9HDU92WnBousQ= github.com/mackerelio/go-osstat v0.2.5 h1:+MqTbZUhoIt4m8qzkVoXUJg1EuifwlAJSk4Yl2GXh+o= github.com/mackerelio/go-osstat v0.2.5/go.mod h1:atxwWF+POUZcdtR1wnsUcQxTytoHG4uhl2AKKzrOajY= github.com/magiconair/properties v1.8.9 h1:nWcCbLq1N2v/cpNsy5WvQ37Fb+YElfq20WJ/a8RkpQM= @@ -216,6 +218,8 @@ github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8Ku github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.1.2 h1:Th2TIvG1+6ma3e/0/bopBKohOTY7s4dA8V2q4EUcBJ0= github.com/mitchellh/copystructure v1.1.2/go.mod h1:EBArHfARyrSWO/+Wyr9zwEkc6XMFB9XyNgFNmRkZZU4= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= @@ -248,22 +252,20 @@ github.com/petermattis/goid v0.0.0-20241211131331-93ee7e083c43 h1:ah1dvbqPMN5+oc github.com/petermattis/goid v0.0.0-20241211131331-93ee7e083c43/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= -github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk= +github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ= -github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -315,8 +317,8 @@ github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8 github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= -github.com/vishvananda/netlink v1.3.1-0.20241022031324-976bd8de7d81 h1:9fkQcQYvtTr9ayFXuMfDMVuDt4+BYG9FwsGLnrBde0M= -github.com/vishvananda/netlink v1.3.1-0.20241022031324-976bd8de7d81/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs= +github.com/vishvananda/netlink v1.3.1-0.20250221194427-0af32151e72b h1:hYWtmuzlR0jpWu+ljWfPMi7oNiZ9x/D3GbBqgZTOhyI= +github.com/vishvananda/netlink v1.3.1-0.20250221194427-0af32151e72b/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs= github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY= github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= @@ -382,12 +384,10 @@ golang.org/x/crypto v0.0.0-20190422183909-d864b10871cd/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= -golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= +golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= +golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e h1:4qufH0hlUYs6AO6XmZC3GqfDPGSXHVXUFR6OND+iJX4= golang.org/x/exp v0.0.0-20241215155358-4a5509556b9e/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -399,16 +399,16 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= -golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= -golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70= -golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA= +golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= +golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -420,21 +420,20 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= -golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= +golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg= -golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek= +golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU= +golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= +golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= @@ -454,8 +453,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU= google.golang.org/grpc v1.69.0 h1:quSiOM1GJPmPH5XtU+BCoVXcDVJJAzNcoyfC2cCjGkI= google.golang.org/grpc v1.69.0/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= -google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= -google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= +google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -472,36 +471,40 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.32.1 h1:f562zw9cy+GvXzXf0CKlVQ7yHJVYzLfL6JAS4kOAaOc= -k8s.io/api v0.32.1/go.mod h1:/Yi/BqkuueW1BgpoePYBRdDYfjPF5sgTr5+YqDZra5k= -k8s.io/apiextensions-apiserver v0.30.2 h1:l7Eue2t6QiLHErfn2vwK4KgF4NeDgjQkCXtEbOocKIE= -k8s.io/apiextensions-apiserver v0.30.2/go.mod h1:lsJFLYyK40iguuinsb3nt+Sj6CmodSI4ACDLep1rgjw= -k8s.io/apimachinery v0.32.1 h1:683ENpaCBjma4CYqsmZyhEzrGz6cjn1MY/X2jB2hkZs= -k8s.io/apimachinery v0.32.1/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/apiserver v0.32.1 h1:oo0OozRos66WFq87Zc5tclUX2r0mymoVHRq8JmR7Aak= -k8s.io/apiserver v0.32.1/go.mod h1:UcB9tWjBY7aryeI5zAgzVJB/6k7E97bkr1RgqDz0jPw= -k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU= -k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg= -k8s.io/cloud-provider v0.32.1 h1:74rRhnfca3o4CsjjnIp/C3ARVuSmyNsxgWPtH0yc9Z0= -k8s.io/cloud-provider v0.32.1/go.mod h1:GECSanFT+EeZ/ToX3xlasjETzMUI+VFu92zHUDUsGHw= -k8s.io/component-base v0.32.1 h1:/5IfJ0dHIKBWysGV0yKTFfacZ5yNV1sulPh3ilJjRZk= -k8s.io/component-base v0.32.1/go.mod h1:j1iMMHi/sqAHeG5z+O9BFNCF698a1u0186zkjMZQ28w= -k8s.io/component-helpers v0.32.1 h1:TwdsSM1vW9GjnfX18lkrZbwE5G9psCIS2/rhenTDXd8= -k8s.io/component-helpers v0.32.1/go.mod h1:1JT1Ei3FD29yFQ18F3laj1WyvxYdHIhyxx6adKMFQXI= -k8s.io/controller-manager v0.32.1 h1:z3oQp1O5l0cSzM/MKf8V4olhJ9TmnELoJRPcV/v1s+Y= -k8s.io/controller-manager v0.32.1/go.mod h1:dVA1UZPbqHH4hEhrrnLvQ4d5qVQCklNB8GEzYV59v/4= +k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= +k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= +k8s.io/apiextensions-apiserver v0.32.0 h1:S0Xlqt51qzzqjKPxfgX1xh4HBZE+p8KKBq+k2SWNOE0= +k8s.io/apiextensions-apiserver v0.32.0/go.mod h1:86hblMvN5yxMvZrZFX2OhIHAuFIMJIZ19bTvzkP+Fmw= +k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= +k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/apiserver v0.32.3 h1:kOw2KBuHOA+wetX1MkmrxgBr648ksz653j26ESuWNY8= +k8s.io/apiserver v0.32.3/go.mod h1:q1x9B8E/WzShF49wh3ADOh6muSfpmFL0I2t+TG0Zdgc= +k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= +k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= +k8s.io/cloud-provider v0.32.3 h1:WC7KhWrqXsU4b0E4tjS+nBectGiJbr1wuc1TpWXvtZM= +k8s.io/cloud-provider v0.32.3/go.mod h1:/fwBfgRPuh16n8vLHT+PPT+Bc4LAEaJYj38opO2wsYY= +k8s.io/component-base v0.32.3 h1:98WJvvMs3QZ2LYHBzvltFSeJjEx7t5+8s71P7M74u8k= +k8s.io/component-base v0.32.3/go.mod h1:LWi9cR+yPAv7cu2X9rZanTiFKB2kHA+JjmhkKjCZRpI= +k8s.io/component-helpers v0.32.3 h1:9veHpOGTPLluqU4hAu5IPOwkOIZiGAJUhHndfVc5FT4= +k8s.io/component-helpers v0.32.3/go.mod h1:utTBXk8lhkJewBKNuNf32Xl3KT/0VV19DmiXU/SV4Ao= +k8s.io/controller-manager v0.32.3 h1:jBxZnQ24k6IMeWLyxWZmpa3QVS7ww+osAIzaUY/jqyc= +k8s.io/controller-manager v0.32.3/go.mod h1:out1L3DZjE/p7JG0MoMMIaQGWIkt3c+pKaswqSHgKsI= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kms v0.32.1 h1:TW6cswRI/fawoQRFGWLmEceO37rZXupdoRdmO019jCc= -k8s.io/kms v0.32.1/go.mod h1:Bk2evz/Yvk0oVrvm4MvZbgq8BD34Ksxs2SRHn4/UiOM= +k8s.io/kms v0.32.3 h1:HhHw5+pRCzEJp3oFFJ1q5W2N6gAI7YkUg4ay4Z0dgwM= +k8s.io/kms v0.32.3/go.mod h1:Bk2evz/Yvk0oVrvm4MvZbgq8BD34Ksxs2SRHn4/UiOM= k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 h1:hcha5B1kVACrLujCKLbr8XWMxCxzQx42DY8QKYJrDLg= k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7/go.mod h1:GewRfANuJ70iYzvn+i4lezLDAFzvjxZYK1gn1lWcfas= k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.1 h1:uOuSLOMBWkJH0TWa9X6l+mj5nZdm6Ay6Bli8HL8rNfk= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.1/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +sigs.k8s.io/gateway-api v1.2.1 h1:fZZ/+RyRb+Y5tGkwxFKuYuSRQHu9dZtbjenblleOLHM= +sigs.k8s.io/gateway-api v1.2.1/go.mod h1:EpNfEXNjiYfUJypf0eZ0P5iXA9ekSGWaS1WgPaM42X0= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/mcs-api v0.1.1-0.20250116162235-62ede9a032dc h1:oQrn1nrTacXiaXEYg+0TozPznSDIHFl2U/KZ5UFiYT8= +sigs.k8s.io/mcs-api v0.1.1-0.20250116162235-62ede9a032dc/go.mod h1:Uicqc5FnWP4dco2y7+AEg2mzNN20mVX1TDB3aDfmvhc= sigs.k8s.io/structured-merge-diff/v4 v4.5.0 h1:nbCitCK2hfnhyiKo6uf2HxUPTCodY6Qaf85SbDIaMBk= sigs.k8s.io/structured-merge-diff/v4 v4.5.0/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/main.go b/main.go index 6577277c..f11a2614 100644 --- a/main.go +++ b/main.go @@ -7,10 +7,7 @@ import ( "net" "os" - "k8s.io/component-base/logs" - - "github.com/linode/linode-cloud-controller-manager/cloud/linode" - "github.com/linode/linode-cloud-controller-manager/sentry" + "github.com/linode/linodego" "github.com/spf13/pflag" cloudprovider "k8s.io/cloud-provider" "k8s.io/cloud-provider/app" @@ -18,8 +15,12 @@ import ( "k8s.io/cloud-provider/names" "k8s.io/cloud-provider/options" utilflag "k8s.io/component-base/cli/flag" + "k8s.io/component-base/logs" "k8s.io/klog/v2" + "github.com/linode/linode-cloud-controller-manager/cloud/linode" + "github.com/linode/linode-cloud-controller-manager/sentry" + _ "k8s.io/component-base/metrics/prometheus/clientgo" // for client metric registration _ "k8s.io/component-base/metrics/prometheus/version" // for version metric registration ) @@ -40,30 +41,30 @@ func initializeSentry() { ) if dsn, ok = os.LookupEnv(sentryDSNVariable); !ok { - fmt.Printf("%s not set, not initializing Sentry\n", sentryDSNVariable) + klog.Errorf("%s not set, not initializing Sentry\n", sentryDSNVariable) return } if environment, ok = os.LookupEnv(sentryEnvironmentVariable); !ok { - fmt.Printf("%s not set, not initializing Sentry\n", sentryEnvironmentVariable) + klog.Errorf("%s not set, not initializing Sentry\n", sentryEnvironmentVariable) return } if release, ok = os.LookupEnv(sentryReleaseVariable); !ok { - fmt.Printf("%s not set, defaulting to unknown", sentryReleaseVariable) + klog.Infof("%s not set, defaulting to unknown", sentryReleaseVariable) release = "unknown" } if err := sentry.Initialize(dsn, environment, release); err != nil { - fmt.Printf("error initializing sentry: %s\n", err.Error()) + klog.Errorf("error initializing sentry: %s\n", err.Error()) return } - fmt.Print("Sentry successfully initialized\n") + klog.Infoln("Sentry successfully initialized") } func main() { - fmt.Printf("Linode Cloud Controller Manager starting up\n") + klog.Infoln("Linode Cloud Controller Manager starting up") initializeSentry() @@ -84,10 +85,14 @@ func main() { command.Flags().BoolVar(&linode.Options.EnableTokenHealthChecker, "enable-token-health-checker", false, "enables Linode API token health checker") command.Flags().StringVar(&linode.Options.VPCName, "vpc-name", "", "[deprecated: use vpc-names instead] vpc name whose routes will be managed by route-controller") command.Flags().StringVar(&linode.Options.VPCNames, "vpc-names", "", "comma separated vpc names whose routes will be managed by route-controller") + command.Flags().StringVar(&linode.Options.SubnetNames, "subnet-names", "default", "comma separated subnet names whose routes will be managed by route-controller (requires vpc-names flag to also be set)") command.Flags().StringVar(&linode.Options.LoadBalancerType, "load-balancer-type", "nodebalancer", "configures which type of load-balancing to use for LoadBalancer Services (options: nodebalancer, cilium-bgp)") command.Flags().StringVar(&linode.Options.BGPNodeSelector, "bgp-node-selector", "", "node selector to use to perform shared IP fail-over with BGP (e.g. cilium-bgp-peering=true") command.Flags().StringVar(&linode.Options.IpHolderSuffix, "ip-holder-suffix", "", "suffix to append to the ip holder name when using shared IP fail-over with BGP (e.g. ip-holder-suffix=my-cluster-name") + command.Flags().StringVar(&linode.Options.DefaultNBType, "default-nodebalancer-type", string(linodego.NBTypeCommon), "default type of NodeBalancer to create (options: common, premium)") + command.Flags().StringVar(&linode.Options.NodeBalancerBackendIPv4Subnet, "nodebalancer-backend-ipv4-subnet", "", "ipv4 subnet to use for NodeBalancer backends") command.Flags().StringSliceVar(&linode.Options.NodeBalancerTags, "nodebalancer-tags", []string{}, "Linode tags to apply to all NodeBalancers") + command.Flags().BoolVar(&linode.Options.EnableIPv6ForLoadBalancers, "enable-ipv6-for-loadbalancers", false, "set both IPv4 and IPv6 addresses for all LoadBalancer services (when disabled, only IPv4 is used)") // Set static flags command.Flags().VisitAll(func(fl *pflag.Flag) { diff --git a/sentry/sentry_test.go b/sentry/sentry_test.go index b26dc9d7..00121110 100644 --- a/sentry/sentry_test.go +++ b/sentry/sentry_test.go @@ -6,6 +6,7 @@ import ( "github.com/getsentry/sentry-go" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestInitialize(t *testing.T) { @@ -48,7 +49,7 @@ func TestInitialize(t *testing.T) { if tt.wantErr { assert.Error(t, err) } else { - assert.NoError(t, err) + require.NoError(t, err) assert.True(t, initialized) } }) @@ -60,7 +61,7 @@ func TestSetHubOnContext(t *testing.T) { initialized = false _ = Initialize("https://test@sentry.io/123", "test", "1.0.0") - ctx := context.Background() + ctx := t.Context() newCtx := SetHubOnContext(ctx) assert.True(t, sentry.HasHubOnContext(newCtx)) @@ -77,7 +78,7 @@ func TestGetHubFromContext(t *testing.T) { { name: "valid hub in context", setupFunc: func() context.Context { - ctx := context.Background() + ctx := t.Context() return SetHubOnContext(ctx) }, initialized: true, @@ -86,7 +87,7 @@ func TestGetHubFromContext(t *testing.T) { { name: "no hub in context", setupFunc: func() context.Context { - return context.Background() + return t.Context() }, initialized: true, wantNil: true, @@ -94,7 +95,7 @@ func TestGetHubFromContext(t *testing.T) { { name: "sentry not initialized", setupFunc: func() context.Context { - return context.Background() + return t.Context() }, initialized: false, wantNil: true, @@ -135,7 +136,7 @@ func TestSetTag(t *testing.T) { { name: "set tag with valid hub", setupFunc: func() context.Context { - return SetHubOnContext(context.Background()) + return SetHubOnContext(t.Context()) }, key: "test-key", value: "test-value", @@ -143,7 +144,7 @@ func TestSetTag(t *testing.T) { { name: "set tag with no hub", setupFunc: func() context.Context { - return context.Background() + return t.Context() }, key: "test-key", value: "test-value", @@ -172,14 +173,14 @@ func TestCaptureError(t *testing.T) { { name: "capture error with valid hub", setupFunc: func() context.Context { - return SetHubOnContext(context.Background()) + return SetHubOnContext(t.Context()) }, err: assert.AnError, }, { name: "capture error with no hub", setupFunc: func() context.Context { - return context.Background() + return t.Context() }, err: assert.AnError, },