diff --git a/.github/workflows/e2e-nightly.yml b/.github/workflows/e2e-nightly.yml index ad433a7b4..e121f9607 100644 --- a/.github/workflows/e2e-nightly.yml +++ b/.github/workflows/e2e-nightly.yml @@ -138,7 +138,7 @@ jobs: - name: Run e2e test run: | export AWS_B64ENCODED_CREDENTIALS=$(clusterawsadm bootstrap credentials encode-as-profile) - make e2e TEST_NAME="${{ matrix.e2e-suite }}" E2E_CONF_FILE="$(pwd)/e2e/config/aws.yaml" + make e2e-aws TEST_NAME="${{ matrix.e2e-suite }}" - name: Archive artifacts if: failure() diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index 2076c934c..f123905c5 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -186,23 +186,6 @@ jobs: run: | make -C inttest ${{ matrix.smoke-suite }} - - capi-smokes-on-different-k0s-versions: - name: CAPI Smokes Matrix - needs: [build, unittest] - strategy: - fail-fast: false - matrix: - smoke-suite: - - check-capi-remote-machine - - check-capi-remote-machine-template - k0s-version: [ "v1.28.11", "v1.29.6", "v1.30.2" ] - - uses: ./.github/workflows/capi-smoke-tests.yml - with: - smoke-suite: ${{ matrix.smoke-suite }} - k0s-version: ${{ matrix.k0s-version }} - capi-smokes: name: Cluster API smoke tests needs: [build, unittest] @@ -223,7 +206,6 @@ jobs: - check-capi-controlplane-docker-worker - check-capi-docker-machine-change-args - check-capi-docker-machine-change-template - - check-capi-remote-machine-template-update - check-capi-docker-machine-template-update - check-capi-docker-machine-template-update-recreate - check-capi-docker-machine-template-update-recreate-single @@ -285,6 +267,6 @@ jobs: if: failure() uses: actions/upload-artifact@v4.3.2 with: - name: e2e-artifacts + name: e2e-artifacts-${{ matrix.e2e-suite }} path: _artifacts if-no-files-found: ignore diff --git a/.gitignore b/.gitignore index b7dfe26dd..93f5f4470 100644 --- a/.gitignore +++ b/.gitignore @@ -42,4 +42,5 @@ __debug_bin* _artifacts # E2E test templates -e2e/data/infrastructure-docker/**/cluster-template*.yaml \ No newline at end of file +e2e/data/infrastructure-docker/**/cluster-template*.yaml +e2e/data/infrastructure-k0smotron/**/cluster-template*.yaml \ No newline at end of file diff --git a/Makefile b/Makefile index 31331356c..b25c3d1d5 100644 --- a/Makefile +++ b/Makefile @@ -15,7 +15,8 @@ ENVTEST ?= $(LOCALBIN)/setup-envtest CRDOC ?= $(LOCALBIN)/crdoc ## e2e configuration -E2E_CONF_FILE ?= $(shell pwd)/e2e/config/docker.yaml +E2E_CONF_FILE ?= $(shell pwd)/e2e/config.yaml +E2E_INFRASTRUCTURE_PROVIDER ?= k0sproject-k0smotron SKIP_RESOURCE_CLEANUP ?= false # Artifacts folder generated for e2e tests ARTIFACTS ?= $(shell pwd)/_artifacts @@ -116,7 +117,9 @@ vet: ## Run go vet against code. test: $(ENVTEST) KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $(GO_TEST_DIRS) -run '$(TEST_NAME)' -coverprofile cover.out +## Template by infrastructure provider for e2e tests DOCKER_TEMPLATES := e2e/data/infrastructure-docker +K0SMOTRON_TEMPLATES := e2e/data/infrastructure-k0smotron .PHONY: generate-e2e-templates-main generate-e2e-templates-main: $(KUSTOMIZE) @@ -127,6 +130,14 @@ generate-e2e-templates-main: $(KUSTOMIZE) $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template-machinedeployment --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template-machinedeployment.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template-remote-hcp --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template-remote-hcp.yaml $(KUSTOMIZE) build $(DOCKER_TEMPLATES)/main/cluster-template-ingress --load-restrictor LoadRestrictionsNone > $(DOCKER_TEMPLATES)/main/cluster-template-ingress.yaml + $(KUSTOMIZE) build $(K0SMOTRON_TEMPLATES)/main/cluster-template --load-restrictor LoadRestrictionsNone > $(K0SMOTRON_TEMPLATES)/main/cluster-template.yaml + $(KUSTOMIZE) build $(K0SMOTRON_TEMPLATES)/main/cluster-template-webhook-recreate-in-single-mode --load-restrictor LoadRestrictionsNone > $(K0SMOTRON_TEMPLATES)/main/cluster-template-webhook-recreate-in-single-mode.yaml + $(KUSTOMIZE) build $(K0SMOTRON_TEMPLATES)/main/cluster-template-webhook-k0s-not-compatible --load-restrictor LoadRestrictionsNone > $(K0SMOTRON_TEMPLATES)/main/cluster-template-webhook-k0s-not-compatible.yaml + $(KUSTOMIZE) build $(K0SMOTRON_TEMPLATES)/main/cluster-template-kcp-remediation --load-restrictor LoadRestrictionsNone > $(K0SMOTRON_TEMPLATES)/main/cluster-template-kcp-remediation.yaml + $(KUSTOMIZE) build $(K0SMOTRON_TEMPLATES)/main/cluster-template-remote-hcp --load-restrictor LoadRestrictionsNone > $(K0SMOTRON_TEMPLATES)/main/cluster-template-remote-hcp.yaml + $(KUSTOMIZE) build $(K0SMOTRON_TEMPLATES)/main/cluster-template-ingress --load-restrictor LoadRestrictionsNone > $(K0SMOTRON_TEMPLATES)/main/cluster-template-ingress.yaml + $(KUSTOMIZE) build $(K0SMOTRON_TEMPLATES)/main/cluster-template-machinedeployment --load-restrictor LoadRestrictionsNone > $(K0SMOTRON_TEMPLATES)/main/cluster-template-machinedeployment.yaml + e2e: generate-e2e-templates-main @@ -135,6 +146,7 @@ e2e: generate-e2e-templates-main -artifacts-folder="$(ARTIFACTS)" \ -config="$(E2E_CONF_FILE)" \ -skip-resource-cleanup=$(SKIP_RESOURCE_CLEANUP) \ + -infrastructure-provider=$(E2E_INFRASTRUCTURE_PROVIDER) \ -timeout=30m e2e-aws: @@ -143,7 +155,7 @@ e2e-aws: @[ -n "$$AWS_REGION" ] || (echo "AWS_REGION not defined"; exit 1) @[ -n "$$AWS_B64ENCODED_CREDENTIALS" ] || (echo "AWS_B64ENCODED_CREDENTIALS not defined"; exit 1) @[ -n "$$SSH_PUBLIC_KEY" ] || (echo "SSH_PUBLIC_KEY not defined"; exit 1) - $(MAKE) e2e TEST_NAME="${TEST_NAME}" E2E_CONF_FILE="$(shell pwd)/e2e/config/aws.yaml" + $(MAKE) e2e TEST_NAME="${TEST_NAME}" E2E_INFRASTRUCTURE_PROVIDER=aws ##@ Build diff --git a/docs/contributing/release-process.md b/docs/contributing/release-process.md index 2d5654d77..67c8d1330 100644 --- a/docs/contributing/release-process.md +++ b/docs/contributing/release-process.md @@ -57,4 +57,4 @@ You can verify the documentation at https://docs.k0smotron.io/ after the workflo If you are making a new **major** or **minor** release, after publishing the release, update the E2E upgrade test to reflect the new release version: - Add new release in `k0smotronMinorVersionsToCheckUpgrades` in [`e2e/k0smotron_upgrade_test.go`](https://github.com/k0sproject/k0smotron/blob/main/e2e/k0smotron_upgrade_test.go). -- Add the new release entry under the `k0sproject-k0smotron` provider in [`e2e/config/docker.yaml`](https://github.com/k0sproject/k0smotron/blob/main/e2e/config/docker.yaml) (including matching `control-plane-components.yaml` and `bootstrap-components.yaml` URLs). \ No newline at end of file +- Add the new release entry under the `k0sproject-k0smotron` provider in [`e2e/config.yaml`](https://github.com/k0sproject/k0smotron/blob/main/e2e/config.yaml) (including matching `control-plane-components.yaml` and `bootstrap-components.yaml` URLs). \ No newline at end of file diff --git a/e2e/admission_webhook_test.go b/e2e/admission_webhook_test.go index d5aa7aa94..14f5a115b 100644 --- a/e2e/admission_webhook_test.go +++ b/e2e/admission_webhook_test.go @@ -55,10 +55,10 @@ func admissionWebhookRecreateStrategyInSingleModeSpec(t *testing.T) { Namespace: namespace.Name, ClusterName: clusterName, KubernetesVersion: e2eConfig.MustGetVariable(KubernetesVersion), - ControlPlaneMachineCount: ptr.To[int64](3), - // TODO: make infra provider configurable - InfrastructureProvider: "docker", - LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()), + ControlPlaneMachineCount: ptr.To(int64(controlPlaneMachineCount)), + WorkerMachineCount: ptr.To(int64(workerMachineCount)), + InfrastructureProvider: infrastructureProvider, + LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()), ClusterctlVariables: map[string]string{ "CLUSTER_NAME": clusterName, "NAMESPACE": namespace.Name, @@ -80,18 +80,16 @@ func admissionWebhookK0sVersionNotCompatibleSpec(t *testing.T) { clusterName := fmt.Sprintf("%s-%s", testName, capiutil.RandomString(6)) workloadClusterTemplate := clusterctl.ConfigCluster(ctx, clusterctl.ConfigClusterInput{ - ClusterctlConfigPath: clusterctlConfigPath, - KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(), - // select cluster templates - Flavor: "webhook-k0s-not-compatible", - + ClusterctlConfigPath: clusterctlConfigPath, + KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(), + Flavor: "webhook-k0s-not-compatible", Namespace: namespace.Name, ClusterName: clusterName, KubernetesVersion: e2eConfig.MustGetVariable(KubernetesVersion), - ControlPlaneMachineCount: ptr.To[int64](3), - // TODO: make infra provider configurable - InfrastructureProvider: "docker", - LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()), + ControlPlaneMachineCount: ptr.To(int64(controlPlaneMachineCount)), + WorkerMachineCount: ptr.To(int64(workerMachineCount)), + InfrastructureProvider: infrastructureProvider, + LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()), ClusterctlVariables: map[string]string{ "CLUSTER_NAME": clusterName, "NAMESPACE": namespace.Name, diff --git a/e2e/config/docker.yaml b/e2e/config.yaml similarity index 59% rename from e2e/config/docker.yaml rename to e2e/config.yaml index 182a1edc1..2e867f870 100644 --- a/e2e/config/docker.yaml +++ b/e2e/config.yaml @@ -3,7 +3,7 @@ # - cluster-api # - bootstrap k0smotron # - control-plane k0smotron -# - infrastructure docker +# - infrastructure docker, aws, k0sproject-k0smotron images: - name: quay.io/k0sproject/k0smotron:latest loadBehavior: mustLoad @@ -17,10 +17,67 @@ providers: type: url contract: v1beta1 files: - - sourcePath: "../data/shared/v1beta1/metadata.yaml" + - sourcePath: "./data/shared/v1beta1/metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" + - name: k0sproject-k0smotron + type: InfrastructureProvider + versions: + - name: "{go://github.com/k0sproject/k0smotron@v1.5}" + value: https://github.com/k0sproject/k0smotron/releases/download/{go://github.com/k0sproject/k0smotron@v1.5}/infrastructure-components.yaml + type: url + contract: v1beta1 + files: + - sourcePath: "../metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" + - name: "{go://github.com/k0sproject/k0smotron@v1.6}" + value: https://github.com/k0sproject/k0smotron/releases/download/{go://github.com/k0sproject/k0smotron@v1.6}/infrastructure-components.yaml + type: url + contract: v1beta1 + files: + - sourcePath: "../metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" + - name: "{go://github.com/k0sproject/k0smotron@v1.7}" + value: https://github.com/k0sproject/k0smotron/releases/download/{go://github.com/k0sproject/k0smotron@v1.7}/infrastructure-components.yaml + type: url + contract: v1beta1 + files: + - sourcePath: "../metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" + - name: "{go://github.com/k0sproject/k0smotron@v1.8}" + value: https://github.com/k0sproject/k0smotron/releases/download/{go://github.com/k0sproject/k0smotron@v1.8}/infrastructure-components.yaml + type: url + contract: v1beta1 + files: + - sourcePath: "../metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" + - name: v1.9.99 # potentially next release. Manifest from source files (development) are used. + value: ../config/clusterapi/infrastructure + contract: v1beta1 + files: + - sourcePath: "../metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" + - old: "image: k0s/k0smotron:latest" + new: "image: quay.io/k0sproject/k0smotron:latest" # For local testing, this image needs to be built before run e2e by using `make docker-build` + files: + - sourcePath: "./data/infrastructure-k0smotron/main/cluster-template.yaml" + - sourcePath: "./data/infrastructure-k0smotron/main/cluster-template-kcp-remediation.yaml" + - sourcePath: "./data/infrastructure-k0smotron/main/cluster-template-webhook-recreate-in-single-mode.yaml" + - sourcePath: "./data/infrastructure-k0smotron/main/cluster-template-webhook-k0s-not-compatible.yaml" + - sourcePath: "./data/infrastructure-k0smotron/main/cluster-template-machinedeployment.yaml" + - sourcePath: "./data/infrastructure-k0smotron/main/cluster-template-remote-hcp.yaml" + - sourcePath: "./data/infrastructure-k0smotron/main/cluster-template-ingress.yaml" - name: docker type: InfrastructureProvider versions: @@ -29,18 +86,32 @@ providers: type: url contract: v1beta1 files: - - sourcePath: "../data/shared/v1beta1/metadata.yaml" + - sourcePath: "./data/shared/v1beta1/metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" + files: + - sourcePath: "./data/infrastructure-docker/main/cluster-template.yaml" + - sourcePath: "./data/infrastructure-docker/main/cluster-template-kcp-remediation.yaml" + - sourcePath: "./data/infrastructure-docker/main/cluster-template-webhook-recreate-in-single-mode.yaml" + - sourcePath: "./data/infrastructure-docker/main/cluster-template-webhook-k0s-not-compatible.yaml" + - sourcePath: "./data/infrastructure-docker/main/cluster-template-machinedeployment.yaml" + - sourcePath: "./data/infrastructure-docker/main/cluster-template-remote-hcp.yaml" + - sourcePath: "./data/infrastructure-docker/main/cluster-template-ingress.yaml" + - name: aws + type: InfrastructureProvider + versions: + - name: v2.9.0 + value: "https://github.com/kubernetes-sigs/cluster-api-provider-aws/releases/download/v2.9.0/infrastructure-components.yaml" + type: url + contract: v1beta1 + files: + - sourcePath: ".data.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" files: - - sourcePath: "../data/infrastructure-docker/main/cluster-template.yaml" - - sourcePath: "../data/infrastructure-docker/main/cluster-template-kcp-remediation.yaml" - - sourcePath: "../data/infrastructure-docker/main/cluster-template-webhook-recreate-in-single-mode.yaml" - - sourcePath: "../data/infrastructure-docker/main/cluster-template-webhook-k0s-not-compatible.yaml" - - sourcePath: "../data/infrastructure-docker/main/cluster-template-machinedeployment.yaml" - - sourcePath: "../data/infrastructure-docker/main/cluster-template-remote-hcp.yaml" - - sourcePath: "../data/infrastructure-docker/main/cluster-template-ingress.yaml" + - sourcePath: "./data/infrastructure-aws/cluster-template-ignition.yaml" - name: k0sproject-k0smotron type: ControlPlaneProvider versions: @@ -49,7 +120,7 @@ providers: type: url contract: v1beta1 files: - - sourcePath: "../../metadata.yaml" + - sourcePath: "../metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" @@ -58,7 +129,7 @@ providers: type: url contract: v1beta1 files: - - sourcePath: "../../metadata.yaml" + - sourcePath: "../metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" @@ -67,7 +138,7 @@ providers: type: url contract: v1beta1 files: - - sourcePath: "../../metadata.yaml" + - sourcePath: "../metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" @@ -76,7 +147,7 @@ providers: type: url contract: v1beta1 files: - - sourcePath: "../../metadata.yaml" + - sourcePath: "../metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" @@ -85,15 +156,15 @@ providers: type: url contract: v1beta1 files: - - sourcePath: "../../metadata.yaml" + - sourcePath: "../metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" - name: v1.9.99 # potentially next release. Manifest from source files (development) are used. - value: ../../config/default + value: ../config/clusterapi/controlplane contract: v1beta1 files: - - sourcePath: "../../metadata.yaml" + - sourcePath: "../metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" @@ -107,7 +178,7 @@ providers: type: url contract: v1beta1 files: - - sourcePath: "../../metadata.yaml" + - sourcePath: "../metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" @@ -116,7 +187,7 @@ providers: type: url contract: v1beta1 files: - - sourcePath: "../../metadata.yaml" + - sourcePath: "../metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" @@ -125,7 +196,7 @@ providers: type: url contract: v1beta1 files: - - sourcePath: "../../metadata.yaml" + - sourcePath: "../metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" @@ -134,7 +205,7 @@ providers: type: url contract: v1beta1 files: - - sourcePath: "../../metadata.yaml" + - sourcePath: "../metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" @@ -143,15 +214,15 @@ providers: type: url contract: v1beta1 files: - - sourcePath: "../../metadata.yaml" + - sourcePath: "../metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" - name: v1.9.99 # potentially next release. Manifest from source files (development) are used. - value: ../../config/default + value: ../config/clusterapi/bootstrap contract: v1beta1 files: - - sourcePath: "../../metadata.yaml" + - sourcePath: "../metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" @@ -161,8 +232,9 @@ providers: variables: KUBERNETES_VERSION_MANAGEMENT: "v1.30.0" KUBERNETES_VERSION: "v1.31.0" - KUBERNETES_VERSION_FIRST_UPGRADE_TO: "v1.30.2+k0s.0" - KUBERNETES_VERSION_SECOND_UPGRADE_TO: "v1.31.2+k0s.0" + K0S_VERSION: "v1.30.1+k0s.0" + K0S_VERSION_FIRST_UPGRADE_TO: "v1.30.2+k0s.0" + K0S_VERSION_SECOND_UPGRADE_TO: "v1.31.2+k0s.0" IP_FAMILY: "IPv4" KIND_IMAGE_VERSION: "v1.30.0" # Enabling the feature flags by setting the env variables. @@ -183,7 +255,6 @@ intervals: default/wait-nodes-ready: ["10m", "10s"] default/wait-machine-remediation: ["5m", "10s"] default/wait-autoscaler: ["5m", "10s"] - ingress/wait-controllers: ["5m", "10s"] remote-hcp/wait-controllers: ["5m", "10s"] bootstrap/wait-deployment-available: ["3m", "10s"] node-drain/wait-deployment-available: ["3m", "10s"] @@ -193,6 +264,7 @@ intervals: workload-recreate-upgrade/wait-cluster: ["10m", "10s"] workload-recreate-upgrade/wait-control-plane: ["20m", "10s"] workload-recreate-upgrade/wait-worker-nodes: ["20m", "10s"] + workload-inplace-upgrade/wait-controllers: ["10m", "20s"] workload-inplace-upgrade/wait-cluster: ["10m", "10s"] workload-inplace-upgrade/wait-control-plane: ["20m", "10s"] workload-inplace-upgrade/wait-worker-nodes: ["20m", "10s"] diff --git a/e2e/config/aws.yaml b/e2e/config/aws.yaml deleted file mode 100644 index aca5db94c..000000000 --- a/e2e/config/aws.yaml +++ /dev/null @@ -1,164 +0,0 @@ ---- -# E2E test scenario using local dev images and manifests built from the source tree for following providers: -# - cluster-api -# - bootstrap k0smotron -# - control-plane k0smotron -# - infrastructure aws -images: - - name: quay.io/k0sproject/k0smotron:latest - loadBehavior: mustLoad - -providers: - - name: cluster-api - type: CoreProvider - versions: - - name: "{go://sigs.k8s.io/cluster-api@v1.10}" - value: https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v1.10}/core-components.yaml - type: url - contract: v1beta1 - files: - - sourcePath: "../data/shared/v1beta1/metadata.yaml" - replacements: - - old: "imagePullPolicy: Always" - new: "imagePullPolicy: IfNotPresent" - - name: aws - type: InfrastructureProvider - versions: - - name: v2.9.0 - value: "https://github.com/kubernetes-sigs/cluster-api-provider-aws/releases/download/v2.9.0/infrastructure-components.yaml" - type: url - contract: v1beta1 - files: - - sourcePath: "../data/shared/v1beta1_aws/metadata.yaml" - replacements: - - old: "imagePullPolicy: Always" - new: "imagePullPolicy: IfNotPresent" - files: - - sourcePath: "../data/infrastructure-aws/cluster-template-ignition.yaml" - - name: k0sproject-k0smotron - type: ControlPlaneProvider - versions: - - name: "{go://github.com/k0sproject/k0smotron@v1.4}" - value: https://github.com/k0sproject/k0smotron/releases/download/{go://github.com/k0sproject/k0smotron@v1.4}/control-plane-components.yaml - type: url - contract: v1beta1 - files: - - sourcePath: "../../metadata.yaml" - replacements: - - old: "imagePullPolicy: Always" - new: "imagePullPolicy: IfNotPresent" - - name: "{go://github.com/k0sproject/k0smotron@v1.5}" - value: https://github.com/k0sproject/k0smotron/releases/download/{go://github.com/k0sproject/k0smotron@v1.5}/control-plane-components.yaml - type: url - contract: v1beta1 - files: - - sourcePath: "../../metadata.yaml" - replacements: - - old: "imagePullPolicy: Always" - new: "imagePullPolicy: IfNotPresent" - - name: "{go://github.com/k0sproject/k0smotron@v1.6}" - value: https://github.com/k0sproject/k0smotron/releases/download/{go://github.com/k0sproject/k0smotron@v1.6}/control-plane-components.yaml - type: url - contract: v1beta1 - files: - - sourcePath: "../../metadata.yaml" - replacements: - - old: "imagePullPolicy: Always" - new: "imagePullPolicy: IfNotPresent" - - name: v1.9.99 # potentially next release. Manifest from source files (development) are used. - value: ../../config/default - contract: v1beta1 - files: - - sourcePath: "../../metadata.yaml" - replacements: - - old: "imagePullPolicy: Always" - new: "imagePullPolicy: IfNotPresent" - - old: "image: k0s/k0smotron:latest" - new: "image: quay.io/k0sproject/k0smotron:latest" # For local testing, this image needs to be built before run e2e by using `make docker-build` - - name: k0sproject-k0smotron - type: BootstrapProvider - versions: - - name: "{go://github.com/k0sproject/k0smotron@v1.4}" - value: https://github.com/k0sproject/k0smotron/releases/download/{go://github.com/k0sproject/k0smotron@v1.4}/bootstrap-components.yaml - type: url - contract: v1beta1 - files: - - sourcePath: "../../metadata.yaml" - replacements: - - old: "imagePullPolicy: Always" - new: "imagePullPolicy: IfNotPresent" - - name: "{go://github.com/k0sproject/k0smotron@v1.5}" - value: https://github.com/k0sproject/k0smotron/releases/download/{go://github.com/k0sproject/k0smotron@v1.5}/bootstrap-components.yaml - type: url - contract: v1beta1 - files: - - sourcePath: "../../metadata.yaml" - replacements: - - old: "imagePullPolicy: Always" - new: "imagePullPolicy: IfNotPresent" - - name: "{go://github.com/k0sproject/k0smotron@v1.6}" - value: https://github.com/k0sproject/k0smotron/releases/download/{go://github.com/k0sproject/k0smotron@v1.6}/bootstrap-components.yaml - type: url - contract: v1beta1 - files: - - sourcePath: "../../metadata.yaml" - replacements: - - old: "imagePullPolicy: Always" - new: "imagePullPolicy: IfNotPresent" - - name: v1.9.99 # potentially next release. Manifest from source files (development) are used. - value: ../../config/default - contract: v1beta1 - files: - - sourcePath: "../../metadata.yaml" - replacements: - - old: "imagePullPolicy: Always" - new: "imagePullPolicy: IfNotPresent" - - old: "image: k0s/k0smotron:latest" - new: "image: quay.io/k0sproject/k0smotron:latest" - -variables: - KUBERNETES_VERSION_MANAGEMENT: "v1.30.0" - KUBERNETES_VERSION: "v1.31.0" - KUBERNETES_VERSION_FIRST_UPGRADE_TO: "v1.30.2+k0s.0" - KUBERNETES_VERSION_SECOND_UPGRADE_TO: "v1.31.2+k0s.0" - IP_FAMILY: "IPv4" - KIND_IMAGE_VERSION: "v1.30.0" - # Enabling the feature flags by setting the env variables. - CLUSTER_TOPOLOGY: "true" - EXP_MACHINE_POOL: "true" - AWS_INSTANCE_TYPE: "t3.large" - # Following feature flags are required to use Ignition as bootstrap data format in CAPA. - EXP_KUBEADM_BOOTSTRAP_FORMAT_IGNITION: "true" - EXP_BOOTSTRAP_FORMAT_IGNITION: "true" - -intervals: - # The array is defined as [timeout, polling interval] - # copied from https://github.com/kubernetes-sigs/cluster-api/blob/main/test/e2e/config/docker.yaml - default/wait-controllers: ["3m", "10s"] - default/wait-cluster: ["5m", "10s"] - default/wait-control-plane: ["10m", "10s"] - default/wait-worker-nodes: ["10m", "10s"] - default/wait-machine-pool-nodes: ["10m", "10s"] - default/wait-delete-cluster: ["10m", "10s"] - default/wait-kube-proxy-upgrade: ["30m", "10s"] - default/wait-machine-pool-upgrade: ["30m", "10s"] - default/wait-nodes-ready: ["10m", "10s"] - default/wait-machine-remediation: ["5m", "10s"] - default/wait-autoscaler: ["5m", "10s"] - remote-hcp/wait-controllers: ["5m", "10s"] - bootstrap/wait-deployment-available: ["3m", "10s"] - node-drain/wait-deployment-available: ["3m", "10s"] - node-drain/wait-control-plane: ["15m", "10s"] - node-drain/wait-machine-deleted: ["2m", "10s"] - kcp-remediation/wait-machines: ["5m", "10s"] - workload-recreate-upgrade/wait-cluster: ["10m", "10s"] - workload-recreate-upgrade/wait-control-plane: ["20m", "10s"] - workload-recreate-upgrade/wait-worker-nodes: ["20m", "10s"] - workload-inplace-upgrade/wait-cluster: ["10m", "10s"] - workload-inplace-upgrade/wait-control-plane: ["20m", "10s"] - workload-inplace-upgrade/wait-worker-nodes: ["20m", "10s"] - machinedeployment/wait-cluster: ["20m", "10s"] - machinedeployment/wait-control-plane: ["20m", "10s"] - machinedeployment/wait-delete-cluster: ["20m", "10s"] - ignition/wait-controllers: ["10m", "10s"] - ignition/wait-machines: ["10m", "10s"] \ No newline at end of file diff --git a/e2e/controlplane_conditions_test.go b/e2e/controlplane_conditions_test.go index 6dc30b00a..00935afc0 100644 --- a/e2e/controlplane_conditions_test.go +++ b/e2e/controlplane_conditions_test.go @@ -58,12 +58,13 @@ func controlplaneConditionsSpec(t *testing.T) { workloadClusterTemplate := clusterctl.ConfigCluster(ctx, clusterctl.ConfigClusterInput{ ClusterctlConfigPath: clusterctlConfigPath, KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(), - InfrastructureProvider: "docker", + InfrastructureProvider: infrastructureProvider, Flavor: "", Namespace: workloadClusterNamespace, ClusterName: workloadClusterName, KubernetesVersion: e2eConfig.MustGetVariable(KubernetesVersion), - ControlPlaneMachineCount: ptr.To[int64](1), + ControlPlaneMachineCount: ptr.To(int64(controlPlaneMachineCount)), + WorkerMachineCount: ptr.To(int64(workerMachineCount)), LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()), ClusterctlVariables: map[string]string{ "CLUSTER_NAME": workloadClusterName, @@ -97,6 +98,7 @@ func controlplaneConditionsSpec(t *testing.T) { e2eutil.GetInterval(e2eConfig, testName, "wait-delete-cluster"), skipCleanup, clusterctlConfigPath, + infrastructureProvider, ) testCancelWatches() diff --git a/e2e/controlplane_remediation_test.go b/e2e/controlplane_remediation_test.go index d14b5ea69..4c67e4522 100644 --- a/e2e/controlplane_remediation_test.go +++ b/e2e/controlplane_remediation_test.go @@ -72,18 +72,16 @@ func controlplaneRemediationSpec(t *testing.T) { require.NoError(t, err) workloadClusterTemplate := clusterctl.ConfigCluster(ctx, clusterctl.ConfigClusterInput{ - ClusterctlConfigPath: clusterctlConfigPath, - KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(), - // select cluster templates - Flavor: "kcp-remediation", - + ClusterctlConfigPath: clusterctlConfigPath, + KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(), + Flavor: "kcp-remediation", Namespace: namespace.Name, ClusterName: clusterName, KubernetesVersion: e2eConfig.MustGetVariable(KubernetesVersion), - ControlPlaneMachineCount: ptr.To[int64](3), - // TODO: make infra provider configurable - InfrastructureProvider: "docker", - LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()), + ControlPlaneMachineCount: ptr.To(int64(controlPlaneMachineCount)), + WorkerMachineCount: ptr.To(int64(workerMachineCount)), + InfrastructureProvider: infrastructureProvider, + LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()), ClusterctlVariables: map[string]string{ "CLUSTER_NAME": clusterName, "NAMESPACE": namespace.Name, diff --git a/e2e/data/infrastructure-k0smotron/main/bases/cluster-with-hcp.yaml b/e2e/data/infrastructure-k0smotron/main/bases/cluster-with-hcp.yaml new file mode 100644 index 000000000..482f0751c --- /dev/null +++ b/e2e/data/infrastructure-k0smotron/main/bases/cluster-with-hcp.yaml @@ -0,0 +1,48 @@ +# This cluster template is used when a cluster needs to be deployed along a MHC. +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.0.0/16 + serviceDomain: cluster.local + services: + cidrBlocks: + - 10.128.0.0/12 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: K0smotronControlPlane + name: ${CLUSTER_NAME} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: RemoteCluster + name: ${CLUSTER_NAME} +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: K0smotronControlPlane +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +spec: + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + version: "${K0S_VERSION}" + service: + type: NodePort + + k0sConfig: + apiVersion: k0s.k0sproject.io/v1beta1 + kind: ClusterConfig + spec: + telemetry: + enabled: false +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: RemoteCluster +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +spec: \ No newline at end of file diff --git a/e2e/data/infrastructure-k0smotron/main/bases/cluster-with-kcp.yaml b/e2e/data/infrastructure-k0smotron/main/bases/cluster-with-kcp.yaml new file mode 100644 index 000000000..ba6bec791 --- /dev/null +++ b/e2e/data/infrastructure-k0smotron/main/bases/cluster-with-kcp.yaml @@ -0,0 +1,73 @@ +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: K0sControlPlane +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +spec: + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + version: "${K0S_VERSION}" + updateStrategy: ${UPDATE_STRATEGY} + k0sConfigSpec: + k0s: + apiVersion: k0s.k0sproject.io/v1beta1 + kind: ClusterConfig + metadata: + name: k0s + spec: + api: + extraArgs: + anonymous-auth: "true" + telemetry: + enabled: false + args: + - --enable-worker + - --no-taints + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: RemoteMachineTemplate + name: ${CLUSTER_NAME}-cp + namespace: ${NAMESPACE} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: RemoteCluster +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +spec: +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.0.0/16 + serviceDomain: cluster.local + services: + cidrBlocks: + - 10.128.0.0/12 + controlPlaneEndpoint: + host: ${LOAD_BALANCER_ADDRESS} + port: 6443 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: K0sControlPlane + name: ${CLUSTER_NAME} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: RemoteCluster + name: ${CLUSTER_NAME} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: RemoteMachineTemplate +metadata: + name: ${CLUSTER_NAME}-cp + namespace: ${NAMESPACE} +spec: + template: + spec: + pool: cp-pool diff --git a/e2e/data/infrastructure-k0smotron/main/bases/md.yaml b/e2e/data/infrastructure-k0smotron/main/bases/md.yaml new file mode 100644 index 000000000..802e0f72c --- /dev/null +++ b/e2e/data/infrastructure-k0smotron/main/bases/md.yaml @@ -0,0 +1,48 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +spec: + replicas: ${WORKER_MACHINE_COUNT} + clusterName: ${CLUSTER_NAME} + selector: + matchLabels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-pool-1 + template: + metadata: + labels: + cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} + pool: worker-pool-1 + spec: + clusterName: ${CLUSTER_NAME} + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: K0sWorkerConfigTemplate + name: ${CLUSTER_NAME} + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: RemoteMachineTemplate + name: ${CLUSTER_NAME}-worker +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: K0sWorkerConfigTemplate +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +spec: + template: + spec: + version: ${K0S_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: RemoteMachineTemplate +metadata: + name: ${CLUSTER_NAME}-worker + namespace: ${NAMESPACE} +spec: + template: + spec: + pool: worker-pool \ No newline at end of file diff --git a/e2e/data/infrastructure-k0smotron/main/bases/pooled-machines.yaml b/e2e/data/infrastructure-k0smotron/main/bases/pooled-machines.yaml new file mode 100644 index 000000000..5f686967b --- /dev/null +++ b/e2e/data/infrastructure-k0smotron/main/bases/pooled-machines.yaml @@ -0,0 +1,90 @@ +# We create a pool of five machines to be used by the k0smotron infrastructure. +# The e2e tests will create and delete machines from this pool as needed. + +# +# MACHINE POOL FOR CONTROL PLANE NODES +# +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: PooledRemoteMachine +metadata: + name: ${CLUSTER_NAME}-1 + namespace: ${NAMESPACE} +spec: + pool: cp-pool + machine: + address: ${ADDRESS_1} + port: 22 + user: root + sshKeyRef: + name: ${CLUSTER_NAME}-ssh-key +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: PooledRemoteMachine +metadata: + name: ${CLUSTER_NAME}-2 + namespace: ${NAMESPACE} +spec: + pool: cp-pool + machine: + address: ${ADDRESS_2} + port: 22 + user: root + sshKeyRef: + name: ${CLUSTER_NAME}-ssh-key +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: PooledRemoteMachine +metadata: + name: ${CLUSTER_NAME}-3 + namespace: ${NAMESPACE} +spec: + pool: cp-pool + machine: + address: ${ADDRESS_3} + port: 22 + user: root + sshKeyRef: + name: ${CLUSTER_NAME}-ssh-key +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: PooledRemoteMachine +metadata: + name: ${CLUSTER_NAME}-4 + namespace: ${NAMESPACE} +spec: + pool: cp-pool + machine: + address: ${ADDRESS_4} + port: 22 + user: root + sshKeyRef: + name: ${CLUSTER_NAME}-ssh-key +--- + +# +# MACHINE POOL FOR WORKER NODES +# +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: PooledRemoteMachine +metadata: + name: ${CLUSTER_NAME}-5 + namespace: ${NAMESPACE} +spec: + pool: worker-pool + machine: + address: ${ADDRESS_5} + port: 22 + user: root + sshKeyRef: + name: ${CLUSTER_NAME}-ssh-key +--- + +# SSH KEY SECRET FOR ACCESSING THE MACHINES IN THE POOL +apiVersion: v1 +kind: Secret +metadata: + name: ${CLUSTER_NAME}-ssh-key + namespace: ${NAMESPACE} +data: + value: ${SSH_PRIVATE_KEY_BASE64} +type: Opaque \ No newline at end of file diff --git a/e2e/data/infrastructure-k0smotron/main/cluster-template-ingress/controlplane-with-ingress.yaml b/e2e/data/infrastructure-k0smotron/main/cluster-template-ingress/controlplane-with-ingress.yaml new file mode 100644 index 000000000..e82816ceb --- /dev/null +++ b/e2e/data/infrastructure-k0smotron/main/cluster-template-ingress/controlplane-with-ingress.yaml @@ -0,0 +1,22 @@ +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: K0smotronControlPlane +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +spec: + version: v1.34.1-k0s.0 + ingress: + apiHost: kube-api.${KIND_IP}.nip.io + konnectivityHost: konnectivity.${KIND_IP}.nip.io + port: ${HAPROXY_PORT} + annotations: + haproxy.org/ssl-passthrough: "true" + k0sConfig: + apiVersion: k0s.k0sproject.io/v1beta1 + kind: ClusterConfig + spec: + api: + extraArgs: + anonymous-auth: "true" + telemetry: + enabled: false \ No newline at end of file diff --git a/e2e/data/infrastructure-k0smotron/main/cluster-template-ingress/kustomization.yaml b/e2e/data/infrastructure-k0smotron/main/cluster-template-ingress/kustomization.yaml new file mode 100644 index 000000000..d07d1f023 --- /dev/null +++ b/e2e/data/infrastructure-k0smotron/main/cluster-template-ingress/kustomization.yaml @@ -0,0 +1,7 @@ +resources: +- ../bases/cluster-with-hcp.yaml +- ../bases/md.yaml +- ../bases/pooled-machines.yaml + +patches: +- path: controlplane-with-ingress.yaml \ No newline at end of file diff --git a/e2e/data/infrastructure-k0smotron/main/cluster-template-kcp-remediation/kcp-with-signal-listener-for-remediation.yaml b/e2e/data/infrastructure-k0smotron/main/cluster-template-kcp-remediation/kcp-with-signal-listener-for-remediation.yaml new file mode 100644 index 000000000..be9d0413f --- /dev/null +++ b/e2e/data/infrastructure-k0smotron/main/cluster-template-kcp-remediation/kcp-with-signal-listener-for-remediation.yaml @@ -0,0 +1,62 @@ +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: K0sControlPlane +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +spec: + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + version: "${K0S_VERSION}" + updateStrategy: Recreate + k0sConfigSpec: + args: + - --enable-worker + k0s: + apiVersion: k0s.k0sproject.io/v1beta1 + kind: ClusterConfig + metadata: + name: k0s + spec: + api: + extraArgs: + anonymous-auth: "true" + telemetry: + enabled: false + network: + controlPlaneLoadBalancing: + enabled: false + files: + - path: /wait-signal.sh + content: | + #!/bin/bash + + set -o errexit + set -o pipefail + + echo "Waiting for signal..." + + TOKEN=$1 + SERVER=$2 + NAMESPACE=$3 + + while true; + do + sleep 1s + + signal=$(curl -k -s --header "Authorization: Bearer $TOKEN" $SERVER/api/v1/namespaces/$NAMESPACE/configmaps/mhc-test | jq -r .data.signal?) + echo "signal $signal" + + if [ "$signal" == "pass" ]; then + curl -k -s --header "Authorization: Bearer $TOKEN" -XPATCH -H "Content-Type: application/strategic-merge-patch+json" --data '{"data": {"signal": "ack-pass"}}' $SERVER/api/v1/namespaces/$NAMESPACE/configmaps/mhc-test + exit 0 + fi + done + permissions: "0777" + preStartCommands: + - ./wait-signal.sh "${TOKEN}" "${SERVER}" "${NAMESPACE}" + + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: RemoteMachineTemplate + name: ${CLUSTER_NAME}-cp + namespace: ${NAMESPACE} \ No newline at end of file diff --git a/e2e/data/infrastructure-k0smotron/main/cluster-template-kcp-remediation/kustomization.yaml b/e2e/data/infrastructure-k0smotron/main/cluster-template-kcp-remediation/kustomization.yaml new file mode 100644 index 000000000..8e8d37cf8 --- /dev/null +++ b/e2e/data/infrastructure-k0smotron/main/cluster-template-kcp-remediation/kustomization.yaml @@ -0,0 +1,8 @@ +resources: +- ../bases/cluster-with-kcp.yaml +- ../bases/md.yaml +- mhc.yaml +- ../bases/pooled-machines.yaml + +patches: +- path: kcp-with-signal-listener-for-remediation.yaml \ No newline at end of file diff --git a/e2e/data/infrastructure-k0smotron/main/cluster-template-kcp-remediation/mhc.yaml b/e2e/data/infrastructure-k0smotron/main/cluster-template-kcp-remediation/mhc.yaml new file mode 100644 index 000000000..c70f916c4 --- /dev/null +++ b/e2e/data/infrastructure-k0smotron/main/cluster-template-kcp-remediation/mhc.yaml @@ -0,0 +1,17 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineHealthCheck +metadata: + name: ${CLUSTER_NAME}-mhc-0 + namespace: ${NAMESPACE} +spec: + clusterName: ${CLUSTER_NAME} + maxUnhealthy: 100% + nodeStartupTimeout: 60s + selector: + matchLabels: + cluster.x-k8s.io/control-plane: "true" + mhc-test: fail + unhealthyConditions: + - status: "False" + timeout: 10s + type: e2e.remediation.condition \ No newline at end of file diff --git a/e2e/data/infrastructure-k0smotron/main/cluster-template-machinedeployment/kustomization.yaml b/e2e/data/infrastructure-k0smotron/main/cluster-template-machinedeployment/kustomization.yaml new file mode 100644 index 000000000..81fb013e1 --- /dev/null +++ b/e2e/data/infrastructure-k0smotron/main/cluster-template-machinedeployment/kustomization.yaml @@ -0,0 +1,4 @@ +resources: +- ../bases/md.yaml +- ../bases/pooled-machines.yaml +- ../bases/cluster-with-hcp.yaml diff --git a/e2e/data/infrastructure-k0smotron/main/cluster-template-remote-hcp/external-cluster-kubeconfig-reference.yaml b/e2e/data/infrastructure-k0smotron/main/cluster-template-remote-hcp/external-cluster-kubeconfig-reference.yaml new file mode 100644 index 000000000..0268c9247 --- /dev/null +++ b/e2e/data/infrastructure-k0smotron/main/cluster-template-remote-hcp/external-cluster-kubeconfig-reference.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +kind: Secret +metadata: + name: hosting-cluster-kubeconfig + namespace: ${NAMESPACE} +type: Opaque +data: + value: ${HOSTING_CLUSTER_KUBECONFIG} \ No newline at end of file diff --git a/e2e/data/infrastructure-k0smotron/main/cluster-template-remote-hcp/hcp-with-external-cluster-patch.yaml b/e2e/data/infrastructure-k0smotron/main/cluster-template-remote-hcp/hcp-with-external-cluster-patch.yaml new file mode 100644 index 000000000..6f4b19d25 --- /dev/null +++ b/e2e/data/infrastructure-k0smotron/main/cluster-template-remote-hcp/hcp-with-external-cluster-patch.yaml @@ -0,0 +1,21 @@ +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: K0smotronControlPlane +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +spec: + kubeconfigRef: + name: hosting-cluster-kubeconfig + namespace: ${NAMESPACE} + key: value + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + version: "${K0S_VERSION}" + service: + type: NodePort + + k0sConfig: + apiVersion: k0s.k0sproject.io/v1beta1 + kind: ClusterConfig + spec: + telemetry: + enabled: false diff --git a/e2e/data/infrastructure-k0smotron/main/cluster-template-remote-hcp/kustomization.yaml b/e2e/data/infrastructure-k0smotron/main/cluster-template-remote-hcp/kustomization.yaml new file mode 100644 index 000000000..5969fb72c --- /dev/null +++ b/e2e/data/infrastructure-k0smotron/main/cluster-template-remote-hcp/kustomization.yaml @@ -0,0 +1,8 @@ +resources: +- ../bases/cluster-with-hcp.yaml +- ../bases/md.yaml +- ../bases/pooled-machines.yaml +- external-cluster-kubeconfig-reference.yaml + +patches: +- path: hcp-with-external-cluster-patch.yaml \ No newline at end of file diff --git a/e2e/data/infrastructure-k0smotron/main/cluster-template-webhook-k0s-not-compatible/invalid-kcp-patch.yaml b/e2e/data/infrastructure-k0smotron/main/cluster-template-webhook-k0s-not-compatible/invalid-kcp-patch.yaml new file mode 100644 index 000000000..1b91ce54b --- /dev/null +++ b/e2e/data/infrastructure-k0smotron/main/cluster-template-webhook-k0s-not-compatible/invalid-kcp-patch.yaml @@ -0,0 +1,38 @@ +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: K0sControlPlane +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +spec: + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + version: v1.31.1+k0s.0 + updateStrategy: ${UPDATE_STRATEGY} + k0sConfigSpec: + args: + - --enable-worker + k0s: + apiVersion: k0s.k0sproject.io/v1beta1 + kind: ClusterConfig + metadata: + name: k0s + spec: + api: + extraArgs: + anonymous-auth: "true" + telemetry: + enabled: false + network: + controlPlaneLoadBalancing: + enabled: false + files: + - path: /tmp/test-file-secret + contentFrom: + secretRef: + name: test-file-secret + key: value + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: RemoteMachineTemplate + name: ${CLUSTER_NAME}-cp + namespace: ${NAMESPACE} \ No newline at end of file diff --git a/e2e/data/infrastructure-k0smotron/main/cluster-template-webhook-k0s-not-compatible/kustomization.yaml b/e2e/data/infrastructure-k0smotron/main/cluster-template-webhook-k0s-not-compatible/kustomization.yaml new file mode 100644 index 000000000..0eec6a5f1 --- /dev/null +++ b/e2e/data/infrastructure-k0smotron/main/cluster-template-webhook-k0s-not-compatible/kustomization.yaml @@ -0,0 +1,7 @@ +resources: +- ../bases/cluster-with-kcp.yaml +- ../bases/md.yaml +- ../bases/pooled-machines.yaml + +patches: +- path: invalid-kcp-patch.yaml \ No newline at end of file diff --git a/e2e/data/infrastructure-k0smotron/main/cluster-template-webhook-recreate-in-single-mode/invalid-kcp-patch.yaml b/e2e/data/infrastructure-k0smotron/main/cluster-template-webhook-recreate-in-single-mode/invalid-kcp-patch.yaml new file mode 100644 index 000000000..8e5540f88 --- /dev/null +++ b/e2e/data/infrastructure-k0smotron/main/cluster-template-webhook-recreate-in-single-mode/invalid-kcp-patch.yaml @@ -0,0 +1,38 @@ +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +kind: K0sControlPlane +metadata: + name: ${CLUSTER_NAME} + namespace: ${NAMESPACE} +spec: + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + version: v1.30.1+k0s.0 + updateStrategy: Recreate + k0sConfigSpec: + args: + - --single + k0s: + apiVersion: k0s.k0sproject.io/v1beta1 + kind: ClusterConfig + metadata: + name: k0s + spec: + api: + extraArgs: + anonymous-auth: "true" + telemetry: + enabled: false + network: + controlPlaneLoadBalancing: + enabled: false + files: + - path: /tmp/test-file-secret + contentFrom: + secretRef: + name: test-file-secret + key: value + machineTemplate: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: RemoteMachineTemplate + name: ${CLUSTER_NAME}-cp + namespace: ${NAMESPACE} \ No newline at end of file diff --git a/e2e/data/infrastructure-k0smotron/main/cluster-template-webhook-recreate-in-single-mode/kustomization.yaml b/e2e/data/infrastructure-k0smotron/main/cluster-template-webhook-recreate-in-single-mode/kustomization.yaml new file mode 100644 index 000000000..0eec6a5f1 --- /dev/null +++ b/e2e/data/infrastructure-k0smotron/main/cluster-template-webhook-recreate-in-single-mode/kustomization.yaml @@ -0,0 +1,7 @@ +resources: +- ../bases/cluster-with-kcp.yaml +- ../bases/md.yaml +- ../bases/pooled-machines.yaml + +patches: +- path: invalid-kcp-patch.yaml \ No newline at end of file diff --git a/e2e/data/infrastructure-k0smotron/main/cluster-template/kustomization.yaml b/e2e/data/infrastructure-k0smotron/main/cluster-template/kustomization.yaml new file mode 100644 index 000000000..f91858d5a --- /dev/null +++ b/e2e/data/infrastructure-k0smotron/main/cluster-template/kustomization.yaml @@ -0,0 +1,4 @@ +resources: +- ../bases/cluster-with-kcp.yaml +- ../bases/md.yaml +- ../bases/pooled-machines.yaml \ No newline at end of file diff --git a/e2e/ignition_provisioning_test.go b/e2e/ignition_provisioning_test.go index 39fd68635..475fafe05 100644 --- a/e2e/ignition_provisioning_test.go +++ b/e2e/ignition_provisioning_test.go @@ -56,15 +56,17 @@ func ignitionProvisioningSpec(t *testing.T) { } workloadClusterTemplate := clusterctl.ConfigCluster(ctx, clusterctl.ConfigClusterInput{ - ClusterctlConfigPath: clusterctlConfigPath, - KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(), - Flavor: "ignition", - + ClusterctlConfigPath: clusterctlConfigPath, + KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(), + Flavor: "ignition", Namespace: namespace.Name, ClusterName: clusterName, KubernetesVersion: e2eConfig.MustGetVariable(KubernetesVersion), - ControlPlaneMachineCount: ptr.To[int64](3), - // CAPD doesn't support ignition, so we hardcode AWS as infrastructure provider + ControlPlaneMachineCount: ptr.To(int64(controlPlaneMachineCount)), + WorkerMachineCount: ptr.To(int64(workerMachineCount)), + // TODO: Infra hardcoded for AWS at the moment because there is no other provider configured supporting + // Ignition provisioning. Once k0smotron infra provider implements AWS as provisioning backend, this + // should be changed to use the e2e configured infra provider but not CAPD. InfrastructureProvider: "aws", LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()), ClusterctlVariables: map[string]string{ @@ -101,6 +103,7 @@ func ignitionProvisioningSpec(t *testing.T) { util.GetInterval(e2eConfig, testName, "wait-delete-cluster"), skipCleanup, clusterctlConfigPath, + infrastructureProvider, ) }() diff --git a/e2e/ingress_test.go b/e2e/ingress_test.go index cd9f5e232..25e5e5997 100644 --- a/e2e/ingress_test.go +++ b/e2e/ingress_test.go @@ -23,11 +23,12 @@ import ( "fmt" "os/exec" "path/filepath" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/controller-runtime/pkg/client" "testing" "time" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" + e2eutil "github.com/k0sproject/k0smotron/e2e/util" "github.com/k0sproject/k0smotron/internal/util" "github.com/stretchr/testify/require" @@ -120,6 +121,7 @@ func ingressSupportSpec(t *testing.T) { e2eutil.GetInterval(e2eConfig, testName, "wait-delete-cluster"), skipCleanup, clusterctlConfigPath, + infrastructureProvider, ) testCancelWatches() diff --git a/e2e/k0smotron_upgrade_test.go b/e2e/k0smotron_upgrade_test.go index 7c581002c..a3e3c51b6 100644 --- a/e2e/k0smotron_upgrade_test.go +++ b/e2e/k0smotron_upgrade_test.go @@ -83,10 +83,18 @@ func k0smotronUpgradeSpec(t *testing.T) { fmt.Println("Turning the new cluster into a management cluster with older versions of providers") + infraProviders := e2eConfig.InfrastructureProviders() + for i, p := range infraProviders { + if p == "k0sproject-k0smotron" { + infraProviders[i] = k0smotronProvider[0] + break + } + } + err = mothership.InitAndWatchControllerLogs(watchesCtx, clusterctl.InitManagementClusterAndWatchControllerLogsInput{ ClusterProxy: managementClusterProxy, ClusterctlConfigPath: clusterctlConfigPath, - InfrastructureProviders: e2eConfig.InfrastructureProviders(), + InfrastructureProviders: infraProviders, DisableMetricsCollection: true, BootstrapProviders: k0smotronProvider, ControlPlaneProviders: k0smotronProvider, @@ -96,7 +104,7 @@ func k0smotronUpgradeSpec(t *testing.T) { fmt.Println("THE MANAGEMENT CLUSTER WITH THE OLDER VERSION OF K0SMOTRON PROVIDERS IS UP&RUNNING!") - fmt.Println(fmt.Sprintf("Creating a namespace for hosting the %s test workload cluster", testName)) + fmt.Printf("Creating a namespace for hosting the %s test workload cluster\n", testName) testNamespace, testCancelWatches := framework.CreateNamespaceAndWatchEvents(ctx, framework.CreateNamespaceAndWatchEventsInput{ Creator: managementClusterProxy.GetClient(), @@ -115,16 +123,14 @@ func k0smotronUpgradeSpec(t *testing.T) { ClusterctlConfigPath: clusterctlConfigPath, KubeconfigPath: managementClusterProxy.GetKubeconfigPath(), // no flavor specified, so it will use the default one "cluster-template" - Flavor: "", - - Namespace: workloadClusterNamespace, - ClusterName: workloadClusterName, - KubernetesVersion: e2eConfig.MustGetVariable(KubernetesVersion), - // TODO: make replicas value configurable - ControlPlaneMachineCount: ptr.To[int64](3), - // TODO: make infra provider configurable - InfrastructureProvider: "docker", - LogFolder: filepath.Join(artifactFolder, "clusters", managementClusterProxy.GetName()), + Flavor: "", + Namespace: workloadClusterNamespace, + ClusterName: workloadClusterName, + KubernetesVersion: e2eConfig.MustGetVariable(KubernetesVersion), + ControlPlaneMachineCount: ptr.To(int64(controlPlaneMachineCount)), + WorkerMachineCount: ptr.To(int64(workerMachineCount)), + InfrastructureProvider: infrastructureProvider, + LogFolder: filepath.Join(artifactFolder, "clusters", managementClusterProxy.GetName()), ClusterctlVariables: map[string]string{ "CLUSTER_NAME": workloadClusterName, "NAMESPACE": workloadClusterNamespace, @@ -133,8 +139,12 @@ func k0smotronUpgradeSpec(t *testing.T) { }) require.NotNil(t, workloadClusterTemplate) + fmt.Println(string(workloadClusterTemplate)) + require.Eventually(t, func() bool { - return managementClusterProxy.CreateOrUpdate(ctx, workloadClusterTemplate) == nil + err := managementClusterProxy.CreateOrUpdate(ctx, workloadClusterTemplate) + fmt.Println(err) + return err == nil }, 10*time.Second, 1*time.Second, "Failed to apply the cluster template") cluster, err := e2eutil.DiscoveryAndWaitForCluster(ctx, capiframework.DiscoveryAndWaitForClusterInput{ @@ -156,6 +166,7 @@ func k0smotronUpgradeSpec(t *testing.T) { e2eutil.GetInterval(e2eConfig, testName, "wait-delete-cluster"), skipCleanup, clusterctlConfigPath, + infrastructureProvider, ) testCancelWatches() @@ -201,14 +212,15 @@ func k0smotronUpgradeSpec(t *testing.T) { latestK0smotronStableMinor, _ := getStableReleaseOfMinor(context.Background(), minor) k0smotronVersion := []string{fmt.Sprintf("k0sproject-k0smotron:v%s", latestK0smotronStableMinor)} - fmt.Println(fmt.Sprintf("Upgrading the management cluster to k0smotron %s", latestK0smotronStableMinor)) + fmt.Printf("Upgrading the management cluster to k0smotron %s\n", latestK0smotronStableMinor) mothership.UpgradeManagementClusterAndWait(ctx, clusterctl.UpgradeManagementClusterAndWaitInput{ - ClusterctlConfigPath: clusterctlConfigPath, - ClusterProxy: managementClusterProxy, - BootstrapProviders: k0smotronVersion, - ControlPlaneProviders: k0smotronVersion, - LogFolder: managementClusterLogFolder, + ClusterctlConfigPath: clusterctlConfigPath, + ClusterProxy: managementClusterProxy, + BootstrapProviders: k0smotronVersion, + ControlPlaneProviders: k0smotronVersion, + InfrastructureProviders: infraProviders, + LogFolder: managementClusterLogFolder, }, e2eutil.GetInterval(e2eConfig, "bootstrap", "wait-deployment-available")) controlPlane, err := e2eutil.DiscoveryAndWaitForControlPlaneInitialized(ctx, capiframework.DiscoveryAndWaitForControlPlaneInitializedInput{ @@ -230,7 +242,7 @@ func k0smotronUpgradeSpec(t *testing.T) { require.True(t, validateMachineRollout(preUpgradeMachineList, postUpgradeMachineList), "The machines in the workload cluster have been rolled out unexpectedly") - fmt.Println(fmt.Sprintf("THE MANAGEMENT CLUSTER WITH '%s' VERSION OF K0SMOTRON PROVIDERS WORKS!", latestK0smotronStableMinor)) + fmt.Printf("THE MANAGEMENT CLUSTER WITH '%s' VERSION OF K0SMOTRON PROVIDERS WORKS!\n", latestK0smotronStableMinor) } fmt.Println("Upgrading the management cluster to development version of k0smotron") diff --git a/e2e/machinedeployment_test.go b/e2e/machinedeployment_test.go index acf47caa1..376bd142b 100644 --- a/e2e/machinedeployment_test.go +++ b/e2e/machinedeployment_test.go @@ -51,18 +51,16 @@ func TestMachinedeployment(t *testing.T) { clusterName := fmt.Sprintf("%s-%s", testName, capiutil.RandomString(6)) workloadClusterTemplate := clusterctl.ConfigCluster(ctx, clusterctl.ConfigClusterInput{ - ClusterctlConfigPath: clusterctlConfigPath, - KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(), - // select cluster templates - Flavor: "machinedeployment", - + ClusterctlConfigPath: clusterctlConfigPath, + KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(), + Flavor: "machinedeployment", Namespace: namespace.Name, ClusterName: clusterName, KubernetesVersion: "v1.32.2", - ControlPlaneMachineCount: ptr.To[int64](1), - // TODO: make infra provider configurable - InfrastructureProvider: "docker", - LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()), + ControlPlaneMachineCount: ptr.To(int64(controlPlaneMachineCount)), + WorkerMachineCount: ptr.To(int64(workerMachineCount)), + InfrastructureProvider: infrastructureProvider, + LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()), ClusterctlVariables: map[string]string{ "CLUSTER_NAME": clusterName, "NAMESPACE": namespace.Name, @@ -93,6 +91,7 @@ func TestMachinedeployment(t *testing.T) { util.GetInterval(e2eConfig, testName, "wait-delete-cluster"), skipCleanup, clusterctlConfigPath, + infrastructureProvider, ) }() diff --git a/e2e/remote_hcp_test.go b/e2e/remote_hcp_test.go index 2fa700db0..a010cf6ef 100644 --- a/e2e/remote_hcp_test.go +++ b/e2e/remote_hcp_test.go @@ -84,18 +84,16 @@ func remoteHCPSpec(t *testing.T) { clusterName := fmt.Sprintf("%s-%s", testName, capiutil.RandomString(6)) workloadClusterTemplate := clusterctl.ConfigCluster(ctx, clusterctl.ConfigClusterInput{ - ClusterctlConfigPath: clusterctlConfigPath, - KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(), - // select cluster templates - Flavor: "remote-hcp", - + ClusterctlConfigPath: clusterctlConfigPath, + KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(), + Flavor: "remote-hcp", Namespace: namespace.Name, ClusterName: clusterName, KubernetesVersion: e2eConfig.MustGetVariable(KubernetesVersion), - ControlPlaneMachineCount: ptr.To[int64](3), - // TODO: make infra provider configurable - InfrastructureProvider: "docker", - LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()), + ControlPlaneMachineCount: ptr.To(int64(controlPlaneMachineCount)), + WorkerMachineCount: ptr.To(int64(workerMachineCount)), + InfrastructureProvider: infrastructureProvider, + LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()), ClusterctlVariables: map[string]string{ "CLUSTER_NAME": clusterName, "NAMESPACE": namespace.Name, @@ -127,6 +125,7 @@ func remoteHCPSpec(t *testing.T) { util.GetInterval(e2eConfig, testName, "wait-delete-cluster"), skipCleanup, clusterctlConfigPath, + infrastructureProvider, ) }() diff --git a/e2e/setup.go b/e2e/setup.go index 17c183801..51c05dc84 100644 --- a/e2e/setup.go +++ b/e2e/setup.go @@ -20,6 +20,11 @@ package e2e import ( "context" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/pem" "errors" "flag" "fmt" @@ -28,6 +33,7 @@ import ( "testing" "github.com/onsi/gomega" + "golang.org/x/crypto/ssh" "k8s.io/apimachinery/pkg/runtime" "k8s.io/klog/v2" @@ -36,6 +42,9 @@ import ( cpv1beta1 "github.com/k0sproject/k0smotron/api/controlplane/v1beta1" "github.com/k0sproject/k0smotron/e2e/mothership" "github.com/k0sproject/k0smotron/e2e/util" + "github.com/k0sproject/k0smotron/e2e/util/poolprovisioner" + dockerprovisioner "github.com/k0sproject/k0smotron/e2e/util/poolprovisioner/docker" + clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" "sigs.k8s.io/cluster-api/test/framework" capiframework "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/bootstrap" @@ -45,14 +54,16 @@ import ( // Test suite constants for e2e config variables. const ( - KubernetesVersion = "KUBERNETES_VERSION" - KubernetesVersionManagement = "KUBERNETES_VERSION_MANAGEMENT" - KubernetesVersionFirstUpgradeTo = "KUBERNETES_VERSION_FIRST_UPGRADE_TO" - KubernetesVersionSecondUpgradeTo = "KUBERNETES_VERSION_SECOND_UPGRADE_TO" - ControlPlaneMachineCount = "CONTROL_PLANE_MACHINE_COUNT" - IPFamily = "IP_FAMILY" - SSHPublicKey = "SSH_PUBLIC_KEY" - SSHKeyName = "SSH_KEY_NAME" + KubernetesVersion = "KUBERNETES_VERSION" + KubernetesVersionManagement = "KUBERNETES_VERSION_MANAGEMENT" + K0sVersion = "K0S_VERSION" + K0sVersionFirstUpgradeTo = "K0S_VERSION_FIRST_UPGRADE_TO" + K0sVersionSecondUpgradeTo = "K0S_VERSION_SECOND_UPGRADE_TO" + ControlPlaneMachineCount = "CONTROL_PLANE_MACHINE_COUNT" + IPFamily = "IP_FAMILY" + SSHPublicKey = "SSH_PUBLIC_KEY" + SSHKeyName = "SSH_KEY_NAME" + PoolProvisioner = "POOL_PROVISIONER" ) var ( @@ -89,6 +100,15 @@ var ( // managementClusterProxy allows to interact with the management cluster to be used for the e2e tests. bootstrapClusterProxy capiframework.ClusterProxy + + // controlPlaneMachineCount is the number of control plane machines to create in the workload clusters. + controlPlaneMachineCount int + + // workerMachineCount is the number of worker machines to create in the workload clusters. + workerMachineCount int + + // infrastructureProvider is the infrastructure provider to use for the tests. Default is k0smotron. + infrastructureProvider string ) func init() { @@ -97,6 +117,9 @@ func init() { flag.BoolVar(&skipCleanup, "skip-resource-cleanup", false, "if true, the resource cleanup after tests will be skipped") flag.StringVar(&artifactFolder, "artifacts-folder", "", "folder where e2e test artifact should be stored") flag.BoolVar(&useExistingCluster, "use-existing-cluster", false, "if true, the test uses the current cluster instead of creating a new one (default discovery rules apply)") + flag.IntVar(&controlPlaneMachineCount, "control-plane-machine-count", 3, "number of control plane machines") + flag.IntVar(&workerMachineCount, "worker-machine-count", 1, "number of worker machines") + flag.StringVar(&infrastructureProvider, "infrastructure-provider", "k0sproject-k0smotron", "infrastructure provider to use for the tests") // On the k0smotron side we avoid using Gomega for assertions but since we want to use the // cluster-api framework as much as possible, the framework assertions require registering @@ -115,7 +138,7 @@ func setupAndRun(t *testing.T, test func(t *testing.T)) { tearDown(bootstrapClusterProvider, bootstrapClusterProxy) } }() - err := setupMothership() + err := setup() if err != nil { panic(err) } @@ -123,13 +146,91 @@ func setupAndRun(t *testing.T, test func(t *testing.T)) { test(t) } -func setupMothership() error { +func setupPoolMachinesConfigEnv(ctx context.Context, replicas int, nodeVersion string, e2eConfig *clusterctl.E2EConfig) error { + switch os.Getenv(PoolProvisioner) { + case "docker", "": + poolprovisioner.PoolProvisioner = &dockerprovisioner.Provisioner{} + // TODO: add AWS as provisioner + default: + return fmt.Errorf("unknown pool provisioner: %s", os.Getenv(PoolProvisioner)) + } + + // Create keypair to allow SSH for k0s provisioning by the infrastructure controller. + privateKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return fmt.Errorf("generate private key: %v", err) + } + privDER := x509.MarshalPKCS1PrivateKey(privateKey) + privBlock := &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: privDER, + } + privPEM := pem.EncodeToMemory(privBlock) + privB64 := base64.StdEncoding.EncodeToString(privPEM) + // Format used in the cluster templates + e2eConfig.Variables["SSH_PRIVATE_KEY_BASE64"] = privB64 + // Marshal public key for authorized_keys + pub, err := ssh.NewPublicKey(&privateKey.PublicKey) + if err != nil { + return fmt.Errorf("marshal public key: %v", err) + } + pubAuthorized := ssh.MarshalAuthorizedKey(pub) + + err = poolprovisioner.PoolProvisioner.Provision(ctx, replicas, nodeVersion, pubAuthorized) + if err != nil { + return fmt.Errorf("provision pool machines: %w", err) + } + + // Load Balancer is created only for Docker provisioner at the moment. Check type of the provisioner + if _, ok := poolprovisioner.PoolProvisioner.(*dockerprovisioner.Provisioner); ok { + e2eConfig.Variables["LOAD_BALANCER_ADDRESS"] = dockerprovisioner.GetLoadBalancerIPAddress() + } + + for i, address := range poolprovisioner.PoolProvisioner.GetRemoteMachinesAddresses() { + // Format: ADDRESS_0, ADDRESS_1, ... is used in the cluster templates + e2eConfig.Variables[fmt.Sprintf("ADDRESS_%d", i+1)] = address + } + + return nil +} + +func setup() error { var err error e2eConfig, err = loadE2EConfig(ctx, configPath) if err != nil { return fmt.Errorf("failed to load e2e config: %w", err) } + // Since we share the declaration of the e2e test configuration in the same file with that of the infrastructure providers, + // we remove those that are not necessary for this test and thus avoid creating local clusterctl repositories with unnecessary providers. + filteredNonUsedProviders := []clusterctl.ProviderConfig{} + for _, provider := range e2eConfig.Providers { + if provider.Type == string(clusterctlv1.InfrastructureProviderType) && provider.Name != infrastructureProvider { + continue + } + filteredNonUsedProviders = append(filteredNonUsedProviders, provider) + } + e2eConfig.Providers = filteredNonUsedProviders + + // If k0smotron provider is used, we need to create the virtual machines beforehand. + if hasK0smotronProvider(e2eConfig) { + // We add one extra machine for the load balancer because when a rolling upgrade happens, + // we need to have an extra machine to avoid downtime. + replicas := controlPlaneMachineCount + workerMachineCount + 1 + // We create the pool machines and set the ADDRESS_X environment variables corresponding to + // each machine IP address. + err := setupPoolMachinesConfigEnv(ctx, replicas, e2eConfig.MustGetVariable(KubernetesVersion), e2eConfig) + if err != nil { + if poolprovisioner.PoolProvisioner != nil { + cleanupErr := poolprovisioner.PoolProvisioner.Clean(ctx) + if cleanupErr != nil { + klog.Errorf("failed to clean up pool machines after setup failure: %v", cleanupErr) + } + } + panic(fmt.Errorf("failed to setup pool machines: %w", err)) + } + } + if clusterctlConfig == "" { clusterctlConfigPath = clusterctl.CreateRepository(ctx, clusterctl.CreateRepositoryInput{ E2EConfig: e2eConfig, @@ -162,7 +263,19 @@ func setupMothership() error { fmt.Println("Using an existing bootstrap cluster") } - bootstrapClusterProxy = capiframework.NewClusterProxy("bootstrap", kubeconfigPath, scheme, framework.WithMachineLogCollector(framework.DockerLogCollector{})) + var opts []capiframework.Option + switch infrastructureProvider { + case "docker": + opts = append(opts, framework.WithMachineLogCollector(framework.DockerLogCollector{})) + case "k0sproject-k0smotron": + // At the momento only docker provisioner is supported for k0smotron so we can safely cast here. + dockerProvisioner := poolprovisioner.PoolProvisioner.(*dockerprovisioner.Provisioner) + opts = append(opts, framework.WithMachineLogCollector(dockerprovisioner.RemoteMachineLogCollector{ + Provisioner: dockerProvisioner, + })) + } + + bootstrapClusterProxy = capiframework.NewClusterProxy("bootstrap", kubeconfigPath, scheme, opts...) if bootstrapClusterProxy == nil { return errors.New("failed to get a management cluster proxy") } @@ -183,6 +296,15 @@ func setupMothership() error { return nil } +func hasK0smotronProvider(c *clusterctl.E2EConfig) bool { + for _, i := range c.InfrastructureProviders() { + if i == "k0sproject-k0smotron" { + return true + } + } + return false +} + func tearDown(bootstrapClusterProvider bootstrap.ClusterProvider, bootstrapClusterProxy framework.ClusterProxy) { cancelWatches() if bootstrapClusterProxy != nil { diff --git a/e2e/util/controlplane.go b/e2e/util/controlplane.go index eb1a64482..a3ead0881 100644 --- a/e2e/util/controlplane.go +++ b/e2e/util/controlplane.go @@ -21,20 +21,26 @@ package util import ( "context" "fmt" + "net/url" "strings" "time" + "k8s.io/client-go/tools/clientcmd" "k8s.io/klog/v2" cpv1beta1 "github.com/k0sproject/k0smotron/api/controlplane/v1beta1" + dockerprovisioner "github.com/k0sproject/k0smotron/e2e/util/poolprovisioner/docker" "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/wait" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" capiframework "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/test/infrastructure/container" "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/controller-runtime/pkg/client" crclient "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -109,9 +115,27 @@ func UpgradeControlPlaneAndWaitForReadyUpgrade(ctx context.Context, input Upgrad return err } + isK0smotronInfrastructure, err := isK0smotronInfrastructure(ctx, IsK0smotronInfrastructureInput{ + Getter: input.ClusterProxy.GetClient(), + Cluster: input.Cluster, + }) + if err != nil { + return err + } + + var workloadClient crclient.Client + if isK0smotronInfrastructure { + c, err := getLocalWorkloadClient(ctx, input.ClusterProxy, input.Cluster.Namespace, input.Cluster.Name) + if err != nil { + return err + } + workloadClient = c + } else { + workloadCluster := input.ClusterProxy.GetWorkloadCluster(ctx, input.Cluster.Namespace, input.Cluster.Name) + workloadClient = workloadCluster.GetClient() + } + fmt.Println("Waiting for kube-proxy to have the upgraded kubernetes version") - workloadCluster := input.ClusterProxy.GetWorkloadCluster(ctx, input.Cluster.Namespace, input.Cluster.Name) - workloadClient := workloadCluster.GetClient() return WaitForKubeProxyUpgrade(ctx, WaitForKubeProxyUpgradeInput{ Getter: workloadClient, KubernetesVersion: input.KubernetesUpgradeVersion, @@ -136,7 +160,7 @@ func DiscoveryAndWaitForControlPlaneInitialized(ctx context.Context, input capif return nil, fmt.Errorf("couldn't get the control plane for the cluster %s: %w", klog.KObj(input.Cluster), err) } - fmt.Printf("Waiting for the first control plane machine managed by %s to be provisioned", klog.KObj(controlPlane)) + fmt.Printf("Waiting for the first control plane machine managed by %s to be provisioned\n", klog.KObj(controlPlane)) err = WaitForOneK0sControlPlaneMachineToExist(ctx, WaitForOneK0sControlPlaneMachineToExistInput{ Lister: input.Lister, Cluster: input.Cluster, @@ -335,3 +359,91 @@ func k0smotronControlPlaneExists(ctx context.Context, input K0smotronControlPlan return false, nil } + +type IsK0smotronInfrastructureInput struct { + Getter capiframework.Getter + Cluster *clusterv1.Cluster +} + +func isK0smotronInfrastructure(ctx context.Context, input IsK0smotronInfrastructureInput) (bool, error) { + clusterInfra := &unstructured.Unstructured{} + clusterInfra.SetAPIVersion("infrastructure.cluster.x-k8s.io/v1beta1") + clusterInfra.SetKind("RemoteCluster") + clusterKey := crclient.ObjectKey{ + Name: input.Cluster.Name, + Namespace: input.Cluster.Namespace, + } + + err := input.Getter.Get(ctx, clusterKey, clusterInfra) + if err != nil { + if strings.Contains(err.Error(), "no matches for kind \"RemoteCluster\"") { + return false, nil + } + return false, err + } + + return true, nil +} + +// getLocalWorkloadClient retrieves the workload cluster client for k0smotron infrastructure clusters where controlplane url need to be modified +// to point to the local port-forwarded API server. +func getLocalWorkloadClient(ctx context.Context, clusterProxy capiframework.ClusterProxy, namespace, name string) (crclient.Client, error) { + cl := clusterProxy.GetClient() + + secret := &corev1.Secret{} + key := client.ObjectKey{ + Name: fmt.Sprintf("%s-kubeconfig", name), + Namespace: namespace, + } + err := cl.Get(ctx, key, secret) + if err != nil { + return nil, fmt.Errorf("failed to get secret %s: %w", key, err) + } + + config, err := clientcmd.Load(secret.Data["value"]) + if err != nil { + return nil, fmt.Errorf("failed to load kubeconfig from secret %s: %w", key, err) + } + + currentCluster := config.Contexts[config.CurrentContext].Cluster + + containerRuntime, err := container.NewDockerClient() + if err != nil { + return nil, fmt.Errorf("failed to create docker client: %w", err) + } + ctx = container.RuntimeInto(ctx, containerRuntime) + loadBalancerName := dockerprovisioner.GetLoadBalancerName() + + // Check if the container exists locally. + filters := container.FilterBuilder{} + filters.AddKeyValue("name", loadBalancerName) + containers, err := containerRuntime.ListContainers(ctx, filters) + if err != nil { + return nil, fmt.Errorf("failed to list containers: %w", err) + } + if len(containers) == 0 { + return nil, fmt.Errorf("container %s not found", loadBalancerName) + } + port, err := containerRuntime.GetHostPort(ctx, loadBalancerName, "6443/tcp") + if err != nil { + return nil, fmt.Errorf("failed to get load balancer port: %w", err) + } + + controlPlaneURL := &url.URL{ + Scheme: "https", + Host: "127.0.0.1:" + port, + } + config.Clusters[currentCluster].Server = controlPlaneURL.String() + + // now create the client + restConfig, err := clientcmd.NewDefaultClientConfig(*config, &clientcmd.ConfigOverrides{}).ClientConfig() + if err != nil { + return nil, fmt.Errorf("failed to create rest config from modified kubeconfig: %w", err) + } + + workloadClient, err := crclient.New(restConfig, crclient.Options{}) + if err != nil { + return nil, fmt.Errorf("failed to create workload client from modified rest config: %w", err) + } + return workloadClient, nil +} diff --git a/e2e/util/dump.go b/e2e/util/dump.go index a10add578..7c0334553 100644 --- a/e2e/util/dump.go +++ b/e2e/util/dump.go @@ -23,6 +23,7 @@ import ( "fmt" "path/filepath" + "github.com/k0sproject/k0smotron/e2e/util/poolprovisioner" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime/schema" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -31,7 +32,7 @@ import ( ) // DumpSpecResourcesAndCleanup dumps all the resources in the spec namespace and cleans up the spec namespace. -func DumpSpecResourcesAndCleanup(ctx context.Context, specName string, clusterProxy capiframework.ClusterProxy, artifactFolder string, namespace *corev1.Namespace, cancelWatches context.CancelFunc, cluster *clusterv1.Cluster, interval Interval, skipCleanup bool, clusterctlConfigPath string) { +func DumpSpecResourcesAndCleanup(ctx context.Context, specName string, clusterProxy capiframework.ClusterProxy, artifactFolder string, namespace *corev1.Namespace, cancelWatches context.CancelFunc, cluster *clusterv1.Cluster, interval Interval, skipCleanup bool, clusterctlConfigPath string, infraProvider string) { // Dump all the resources in the spec namespace and the workload cluster. dumpAllResourcesAndLogs(ctx, clusterProxy, artifactFolder, namespace, cluster, clusterctlConfigPath) @@ -49,6 +50,13 @@ func DumpSpecResourcesAndCleanup(ctx context.Context, specName string, clusterPr Deleter: clusterProxy.GetClient(), Name: namespace.Name, }) + + if infraProvider == "k0sproject-k0smotron" { + err := poolprovisioner.PoolProvisioner.Clean(ctx) + if err != nil { + fmt.Printf("Error cleaning up pool provisioner: %v\n", err) + } + } } cancelWatches() } @@ -56,6 +64,7 @@ func DumpSpecResourcesAndCleanup(ctx context.Context, specName string, clusterPr // dumpAllResourcesAndLogs dumps all the resources in the spec namespace and the workload cluster. func dumpAllResourcesAndLogs(ctx context.Context, clusterProxy capiframework.ClusterProxy, artifactFolder string, namespace *corev1.Namespace, cluster *clusterv1.Cluster, clusterctlConfigPath string) { // Dump all the logs from the workload cluster. + // clusterProxy.CollectWorkloadClusterLogs(ctx, cluster.Namespace, cluster.Name, filepath.Join(artifactFolder, "clusters", cluster.Name)) // Dump all Cluster API related resources to artifacts. diff --git a/e2e/util/poolprovisioner/docker/loadbalancer.go b/e2e/util/poolprovisioner/docker/loadbalancer.go new file mode 100644 index 000000000..321ba7e9d --- /dev/null +++ b/e2e/util/poolprovisioner/docker/loadbalancer.go @@ -0,0 +1,185 @@ +package docker + +import ( + "archive/tar" + "bytes" + "context" + "fmt" + "io" + "os" + + dockercontainer "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/client" + "github.com/docker/go-connections/nat" + "github.com/k0sproject/k0smotron/e2e/util/poolprovisioner" +) + +var loadBalancerConfig = ` +global + log stdout format raw local0 + +defaults + mode tcp + log global + timeout connect 5s + timeout client 50s + timeout server 50s + retries 3 + option redispatch + +frontend k8s + mode tcp + bind *:6443 + default_backend remote_nodes + +backend remote_nodes + mode tcp + balance roundrobin + option tcp-check + server node0 %s:6443 check + server node1 %s:6443 check + server node2 %s:6443 check + server node3 %s:6443 check + server node4 %s:6443 check +` + +var lbIPAddress string + +func (d *Provisioner) createLoadBalancer(ctx context.Context, apiClient *client.Client, networkName string, remoteMachinesIPAddresses []string) error { + fmt.Println("Creating load balancer for workload cluster controlplanes") + + f, err := os.CreateTemp("", "") + if err != nil { + return fmt.Errorf("create temp file: %w", err) + } + haproxyCfgPath := f.Name() + f.Close() + + loadBalancerConfig = fmt.Sprintf(loadBalancerConfig, + remoteMachinesIPAddresses[0], + remoteMachinesIPAddresses[1], + remoteMachinesIPAddresses[2], + remoteMachinesIPAddresses[3], + remoteMachinesIPAddresses[4], + ) + if err := os.WriteFile(f.Name(), []byte(loadBalancerConfig), 0644); err != nil { + panic(err) + } + + imageName := "haproxy:2.9" + + reader, err := apiClient.ImagePull(ctx, imageName, image.PullOptions{}) + if err != nil { + return fmt.Errorf("pull image: %w", err) + } + io.Copy(io.Discard, reader) + reader.Close() + + exposedPort := nat.Port("6443/tcp") + + containerConfig := &dockercontainer.Config{ + Image: imageName, + ExposedPorts: nat.PortSet{ + exposedPort: {}, + }, + } + + hostConfig := &dockercontainer.HostConfig{ + NetworkMode: dockercontainer.NetworkMode(networkName), + PortBindings: nat.PortMap{ + exposedPort: []nat.PortBinding{ + { + HostIP: "0.0.0.0", + HostPort: "", + }, + }, + }, + } + + resp, err := apiClient.ContainerCreate(ctx, containerConfig, hostConfig, nil, nil, GetLoadBalancerName()) + if err != nil { + return fmt.Errorf("create container: %w", err) + } + + tarBytes, err := tarFile(haproxyCfgPath, "haproxy.cfg") + if err != nil { + return fmt.Errorf("tar file: %w", err) + } + + err = apiClient.CopyToContainer( + ctx, + resp.ID, + "/usr/local/etc/haproxy/", + bytes.NewReader(tarBytes), + dockercontainer.CopyToContainerOptions{AllowOverwriteDirWithFile: true}, + ) + if err != nil { + return fmt.Errorf("copy to container: %w", err) + } + + if err := apiClient.ContainerStart(ctx, resp.ID, dockercontainer.StartOptions{}); err != nil { + return fmt.Errorf("start container: %w", err) + } + + ip, err := waitForContainerIP(ctx, apiClient, resp.ID, networkName) + if err != nil { + return fmt.Errorf("failed to get container IP %q: %v", resp.ID, err) + } + + lbIPAddress = ip + d.lb = &poolprovisioner.VM{ + ContainerName: GetLoadBalancerName(), + ContainerID: resp.ID, + IPAddress: lbIPAddress, + } + + fmt.Printf("Load balancer started at %s:6443\n", ip) + return nil +} + +// GetLoadBalancerName returns the name of the load balancer container. +func GetLoadBalancerName() string { + return "haproxy-proxy" +} + +// GetLoadBalancerIPAddress returns the IP address of the load balancer. +func GetLoadBalancerIPAddress() string { + return lbIPAddress +} + +func tarFile(srcPath, dstName string) ([]byte, error) { + var buf bytes.Buffer + tw := tar.NewWriter(&buf) + + info, err := os.Stat(srcPath) + if err != nil { + return nil, err + } + + hdr := &tar.Header{ + Name: dstName, + Mode: 0644, + Size: info.Size(), + } + + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + + f, err := os.Open(srcPath) + if err != nil { + return nil, err + } + defer f.Close() + + if _, err := io.Copy(tw, f); err != nil { + return nil, err + } + + if err := tw.Close(); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} diff --git a/e2e/util/poolprovisioner/docker/logcollector.go b/e2e/util/poolprovisioner/docker/logcollector.go new file mode 100644 index 000000000..611758817 --- /dev/null +++ b/e2e/util/poolprovisioner/docker/logcollector.go @@ -0,0 +1,209 @@ +package docker + +import ( + "bytes" + "context" + "fmt" + "os" + osExec "os/exec" + "path/filepath" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/controllers/external" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + "sigs.k8s.io/cluster-api/test/infrastructure/container" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/kind/pkg/errors" +) + +// RemoteMachineLogCollector collects logs from remote machines. +type RemoteMachineLogCollector struct { + Provisioner *Provisioner +} + +// CollectMachineLog collects logs from the given machine and writes them to the outputPath. +func (k RemoteMachineLogCollector) CollectMachineLog(ctx context.Context, c client.Client, m *clusterv1.Machine, outputPath string) error { + containerRuntime, err := container.NewDockerClient() + if err != nil { + return err + } + ctx = container.RuntimeInto(ctx, containerRuntime) + + containerIP, err := getContainerIP(ctx, c, m) + if err != nil { + return err + } + + isControlPlaneNode := false + for k := range m.Labels { + if k == clusterv1.MachineControlPlaneLabel { + isControlPlaneNode = true + break + } + } + + for _, vm := range k.Provisioner.remoteMachines { + if containerIP == vm.IPAddress { + return k.collectLogsFromNode(ctx, outputPath, vm.ContainerName, isControlPlaneNode) + } + } + return fmt.Errorf("no containers found for machine %s", m.Name) +} + +// CollectMachinePoolLog is a no-op for docker provisioner as machine pools templates are not declared yet. +func (k RemoteMachineLogCollector) CollectMachinePoolLog(_ context.Context, _ client.Client, _ *expv1.MachinePool, _ string) error { + return nil +} + +// CollectInfrastructureLogs collects infrastructure logs and writes them to the outputPath. +func (k RemoteMachineLogCollector) CollectInfrastructureLogs(ctx context.Context, _ client.Client, _ *clusterv1.Cluster, outputPath string) error { + containerRuntime, err := container.NewDockerClient() + if err != nil { + return err + } + ctx = container.RuntimeInto(ctx, containerRuntime) + + lbContainerName := k.Provisioner.lb.ContainerName + + f, err := fileOnHost(filepath.Join(outputPath, fmt.Sprintf("%s.log", lbContainerName))) + if err != nil { + return err + } + + defer f.Close() + + return containerRuntime.ContainerDebugInfo(ctx, lbContainerName, f) +} + +// fileOnHost is a helper to create a file at path +// even if the parent directory doesn't exist +// in which case it will be created with ModePerm. +func fileOnHost(path string) (*os.File, error) { + if err := os.MkdirAll(filepath.Dir(path), 0750); err != nil { + return nil, err + } + return os.Create(path) //nolint:gosec // No security issue: path is safe. +} + +func getContainerIP(ctx context.Context, c client.Client, m *clusterv1.Machine) (string, error) { + if m == nil { + return "", nil + } + remoteMachineRef := &corev1.ObjectReference{ + APIVersion: m.Spec.InfrastructureRef.APIVersion, + Kind: m.Spec.InfrastructureRef.Kind, + Namespace: m.Namespace, + Name: m.Spec.InfrastructureRef.Name, + } + uRemoteMachine, err := external.Get(ctx, c, remoteMachineRef) + if err != nil { + return "", err + } + ip, found, err := unstructured.NestedString(uRemoteMachine.Object, "spec", "address") + if err != nil { + return "", err + } + if !found { + return "", fmt.Errorf("no address found in remote machine %s/%s", m.Namespace, m.Name) + } + return ip, nil +} + +// From https://github.com/kubernetes-sigs/cluster-api/blob/main/test/framework/docker_logcollector.go#L106 +// collectLogsFromNode collects logs from the specified container and writes them to outputPath. +func (k RemoteMachineLogCollector) collectLogsFromNode(ctx context.Context, outputPath string, containerName string, isControlPlaneNode bool) error { + containerRuntime, err := container.RuntimeFrom(ctx) + if err != nil { + return errors.Wrap(err, "Failed to collect logs from node") + } + + execToPathFn := func(outputFileName, command string, args ...string) func() error { + return func() error { + f, err := fileOnHost(filepath.Join(outputPath, outputFileName)) + if err != nil { + return err + } + defer f.Close() + execConfig := container.ExecContainerInput{ + OutputBuffer: f, + } + return containerRuntime.ExecContainer(ctx, containerName, &execConfig, command, args...) + } + } + copyDirFn := func(containerDir, dirName string) func() error { + return func() error { + f, err := os.CreateTemp("", containerName) + if err != nil { + return err + } + + tempfileName := f.Name() + outputDir := filepath.Join(outputPath, dirName) + + defer os.Remove(tempfileName) + + var execErr string + execConfig := container.ExecContainerInput{ + OutputBuffer: f, + ErrorBuffer: bytes.NewBufferString(execErr), + } + err = containerRuntime.ExecContainer( + ctx, + containerName, + &execConfig, + "tar", "--hard-dereference", "--dereference", "--directory", containerDir, "--create", "--file", "-", ".", + ) + if err != nil { + return errors.Wrap(err, execErr) + } + + err = os.MkdirAll(outputDir, 0750) + if err != nil { + return err + } + + return osExec.Command("tar", "--extract", "--file", tempfileName, "--directory", outputDir).Run() //nolint:gosec // We don't care about command injection here. + } + } + + serviceName := "k0scontroller.service" + if !isControlPlaneNode { + serviceName = "k0sworker.service" + } + + collectFuncs := []func() error{ + execToPathFn( + "journal.log", + "journalctl", "--no-pager", "--output=short-precise", + ), + execToPathFn( + "kern.log", + "journalctl", "--no-pager", "--output=short-precise", "-k", + ), + execToPathFn( + "kubelet-version.txt", + "kubelet", "--version", + ), + execToPathFn( + "kubelet.log", + "journalctl", "--no-pager", "--output=short-precise", "-u", "kubelet.service", + ), + execToPathFn( + "containerd-info.txt", + "crictl", "info", + ), + execToPathFn( + "containerd.log", + "journalctl", "--no-pager", "--output=short-precise", "-u", "containerd.service", + ), + execToPathFn( + "k0s.log", + "journalctl", "--no-pager", "--output=short-precise", "-u", serviceName, + ), + copyDirFn("/var/log/pods", "pods"), + } + + return errors.AggregateConcurrent(collectFuncs) +} diff --git a/e2e/util/poolprovisioner/docker/pool.go b/e2e/util/poolprovisioner/docker/pool.go new file mode 100644 index 000000000..6fcc71b07 --- /dev/null +++ b/e2e/util/poolprovisioner/docker/pool.go @@ -0,0 +1,241 @@ +package docker + +import ( + "context" + "fmt" + "io" + "strings" + "time" + + dockercontainer "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/image" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/client" + "github.com/docker/go-connections/nat" + "github.com/k0sproject/k0smotron/e2e/util/poolprovisioner" +) + +// Provisioner implements a pool provisioner using Docker containers as VMs. +type Provisioner struct { + remoteMachines []poolprovisioner.VM + lb *poolprovisioner.VM +} + +// Provision creates a number of Docker containers with the specified node version and sets theis addresses in the docker provisioner. +func (d *Provisioner) Provision(ctx context.Context, replicas int, nodeVersion string, publicKey []byte) error { + apiClient, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return fmt.Errorf("create Docker client: %v", err) + } + defer apiClient.Close() + + info, err := apiClient.Info(ctx) + if err != nil { + return fmt.Errorf("unable to inspect Docker engine info: %v", err) + } + + networks, err := apiClient.NetworkList(ctx, network.ListOptions{}) + if err != nil { + return fmt.Errorf("list Docker networks: %v", err) + } + + // Network must be "kind" because management cluster where controllers run is within the "kind" network. + networkName := "kind" + existsKindNetwork := false + for _, n := range networks { + if n.Name == networkName { + existsKindNetwork = true + break + } + } + + if !existsKindNetwork { + _, err = apiClient.NetworkCreate(ctx, networkName, network.CreateOptions{ + Driver: "bridge", + Options: map[string]string{ + "com.docker.network.bridge.enable_ip_masquerade": "true", + }, + }) + if err != nil { + return fmt.Errorf("create %s network: %v", networkName, err) + } + } + + imageName := fmt.Sprintf("kindest/node:%s", nodeVersion) + + reader, err := apiClient.ImagePull(ctx, imageName, image.PullOptions{}) + if err != nil { + return fmt.Errorf("pull image: %w", err) + } + io.Copy(io.Discard, reader) + reader.Close() + + remoteMachinesIPAddresses := []string{} + fmt.Println("Creating a pool of Docker containers as machines...") + for i := 0; i < replicas; i++ { + + containerConfig := &dockercontainer.Config{ + Image: imageName, + Tty: true, + Volumes: map[string]struct{}{ + "/var": {}, + }, + } + + hostConfig := &dockercontainer.HostConfig{ + Privileged: true, + SecurityOpt: []string{"seccomp=unconfined", "apparmor=unconfined"}, + CgroupnsMode: "private", + NetworkMode: dockercontainer.NetworkMode(networkName), + Tmpfs: map[string]string{ + "/tmp": "", + "/run": "", + }, + PortBindings: nat.PortMap{}, + } + + hostConfig.Binds = append(hostConfig.Binds, + "/lib/modules:/lib/modules:ro", + "/var/run/containerd/containerd.sock:/var/run/containerd/containerd.sock:ro", + ) + + if info.Driver == "btrfs" || info.Driver == "zfs" { + hostConfig.Binds = append(hostConfig.Binds, "/dev/mapper:/dev/mapper:ro") + } + + for _, sec := range info.SecurityOptions { + if strings.Contains(sec, "rootless") { + hostConfig.Devices = append(hostConfig.Devices, dockercontainer.DeviceMapping{PathOnHost: "/dev/fuse"}) + break + } + } + + networkConfig := &network.NetworkingConfig{ + EndpointsConfig: map[string]*network.EndpointSettings{ + networkName: { + Aliases: []string{fmt.Sprintf("remote-machine-%d", i)}, + }, + }, + } + + name := fmt.Sprintf("remote-machine-%d", i) + resp, err := apiClient.ContainerCreate(ctx, containerConfig, hostConfig, networkConfig, nil, name) + if err != nil { + return fmt.Errorf("create container %q: %v", name, err) + } + + if err := apiClient.ContainerStart(ctx, resp.ID, dockercontainer.StartOptions{}); err != nil { + return fmt.Errorf("start container %q: %v", name, err) + } + + runSSH := "apt-get update && apt-get install -y openssh-server && mkdir -p /var/run/sshd && /usr/sbin/sshd" + addKey := fmt.Sprintf( + "mkdir -p /root/.ssh && echo '%s' >> /root/.ssh/authorized_keys && chmod 700 /root/.ssh && chmod 600 /root/.ssh/authorized_keys", + strings.TrimSpace(string(publicKey)), + ) + cmd := fmt.Sprintf("%s && %s", runSSH, addKey) + execConfig := dockercontainer.ExecOptions{ + Cmd: []string{"bash", "-c", cmd}, + AttachStdout: true, + AttachStderr: true, + } + execResp, err := apiClient.ContainerExecCreate(ctx, resp.ID, execConfig) + if err != nil { + return fmt.Errorf("create exec in container %q: %v", name, err) + } + if err := apiClient.ContainerExecStart(ctx, execResp.ID, dockercontainer.ExecStartOptions{}); err != nil { + return fmt.Errorf("exec start in container %q: %v", name, err) + } + + ip, err := waitForContainerIP(ctx, apiClient, resp.ID, networkName) + if err != nil { + return fmt.Errorf("failed to get container IP %q: %v", name, err) + } + d.remoteMachines = append(d.remoteMachines, poolprovisioner.VM{ + ContainerName: name, + ContainerID: resp.ID, + IPAddress: ip, + }) + remoteMachinesIPAddresses = append(remoteMachinesIPAddresses, ip) + + fmt.Printf("Created container %q with IP %s\n", name, ip) + } + fmt.Println("Created machines pool.") + + if len(d.remoteMachines) != replicas { + return fmt.Errorf("expected %d addresses, got %d", replicas, len(d.remoteMachines)) + } + + err = d.createLoadBalancer(ctx, apiClient, networkName, remoteMachinesIPAddresses) + if err != nil { + return fmt.Errorf("create load balancer: %v", err) + } + + return nil +} + +// GetRemoteMachinesAddresses returns the IP addresses of the provisioned docker containers virtual machines. +func (d *Provisioner) GetRemoteMachinesAddresses() []string { + var addresses []string + for _, vm := range d.remoteMachines { + addresses = append(addresses, vm.IPAddress) + } + return addresses +} + +// Clean removes all the docker containers virtual machines created by the provisioner (including the load balancer). +func (d *Provisioner) Clean(ctx context.Context) error { + apiClient, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation()) + if err != nil { + return fmt.Errorf("create Docker client: %v", err) + } + defer apiClient.Close() + + fmt.Println("Cleaning up Remote machines...") + for _, vm := range d.remoteMachines { + timeout := 10 + err := apiClient.ContainerStop(ctx, vm.ContainerID, dockercontainer.StopOptions{Timeout: &timeout}) + if err != nil { + return fmt.Errorf("stop container %q: %v", vm.ContainerName, err) + } + + err = apiClient.ContainerRemove(ctx, vm.ContainerID, dockercontainer.RemoveOptions{ + Force: true, + }) + if err != nil { + return fmt.Errorf("remove container %q: %v", vm.ContainerName, err) + } + + fmt.Printf("Removed container %q\n", vm.ContainerName) + } + + if d.lb == nil { + return nil + } + fmt.Println("Removing load balancer...") + err = apiClient.ContainerRemove(ctx, d.lb.ContainerID, dockercontainer.RemoveOptions{ + Force: true, + }) + if err != nil { + return fmt.Errorf("remove load balancer container %q: %v", GetLoadBalancerName(), err) + } + fmt.Println("Load balancer removed.") + return nil +} + +func waitForContainerIP(ctx context.Context, cli *client.Client, id string, network string) (string, error) { + deadline := time.Now().Add(2 * time.Minute) + for time.Now().Before(deadline) { + inspect, err := cli.ContainerInspect(ctx, id) + if err != nil { + return "", err + } + if net, ok := inspect.NetworkSettings.Networks[network]; ok { + if net.IPAddress != "" { + return net.IPAddress, nil + } + } + time.Sleep(500 * time.Millisecond) + } + return "", fmt.Errorf("timeout waiting for IP") +} diff --git a/e2e/util/poolprovisioner/provisioner.go b/e2e/util/poolprovisioner/provisioner.go new file mode 100644 index 000000000..8f1f2f35a --- /dev/null +++ b/e2e/util/poolprovisioner/provisioner.go @@ -0,0 +1,26 @@ +package poolprovisioner + +import ( + "context" +) + +// VM represents a virtual machine created by the provisioner. +type VM struct { + ContainerName string + ContainerID string + IPAddress string +} + +// PoolProvisioner is the global pool provisioner instance. +var PoolProvisioner Provisioner + +// Provisioner is the interface that pool provisioners must implement. +type Provisioner interface { + // Provision creates a number of virtual machines with the specified node version and returns their addresses. + // It set addresses to the Pool variable. + Provision(ctx context.Context, replicas int, nodeVersion string, publicKey []byte) error + // Clean removes all the virtual machines created by the provisioner. + Clean(ctx context.Context) error + // GetRemoteMachinesAddresses returns the IP addresses of the provisioned virtual machines. + GetRemoteMachinesAddresses() []string +} diff --git a/e2e/workload_cluster_inplace_upgrade_test.go b/e2e/workload_cluster_inplace_upgrade_test.go index b1dca545e..0ccdfc1a6 100644 --- a/e2e/workload_cluster_inplace_upgrade_test.go +++ b/e2e/workload_cluster_inplace_upgrade_test.go @@ -59,15 +59,14 @@ func workloadClusterInplaceUpgradeSpec(t *testing.T) { ClusterctlConfigPath: clusterctlConfigPath, KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(), // no flavor specified, so it will use the default one "cluster-template" - Flavor: "", - + Flavor: "", Namespace: namespace.Name, ClusterName: clusterName, KubernetesVersion: e2eConfig.MustGetVariable(KubernetesVersion), - ControlPlaneMachineCount: ptr.To[int64](3), - // TODO: make infra provider configurable - InfrastructureProvider: "docker", - LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()), + ControlPlaneMachineCount: ptr.To(int64(controlPlaneMachineCount)), + WorkerMachineCount: ptr.To(int64(workerMachineCount)), + InfrastructureProvider: infrastructureProvider, + LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()), ClusterctlVariables: map[string]string{ "CLUSTER_NAME": clusterName, "NAMESPACE": namespace.Name, @@ -76,6 +75,8 @@ func workloadClusterInplaceUpgradeSpec(t *testing.T) { }) require.NotNil(t, workloadClusterTemplate) + fmt.Println(string(workloadClusterTemplate)) + require.Eventually(t, func() bool { return bootstrapClusterProxy.CreateOrUpdate(ctx, workloadClusterTemplate) == nil }, 10*time.Second, 1*time.Second, "Failed to apply the cluster template") @@ -99,6 +100,7 @@ func workloadClusterInplaceUpgradeSpec(t *testing.T) { util.GetInterval(e2eConfig, testName, "wait-delete-cluster"), skipCleanup, clusterctlConfigPath, + infrastructureProvider, ) }() @@ -116,7 +118,7 @@ func workloadClusterInplaceUpgradeSpec(t *testing.T) { ClusterProxy: bootstrapClusterProxy, Cluster: cluster, ControlPlane: controlPlane, - KubernetesUpgradeVersion: e2eConfig.MustGetVariable(KubernetesVersionFirstUpgradeTo), + KubernetesUpgradeVersion: e2eConfig.MustGetVariable(K0sVersionFirstUpgradeTo), WaitForKubeProxyUpgradeInterval: util.GetInterval(e2eConfig, testName, "wait-kube-proxy-upgrade"), WaitForControlPlaneReadyInterval: util.GetInterval(e2eConfig, testName, "wait-control-plane"), }) @@ -127,7 +129,7 @@ func workloadClusterInplaceUpgradeSpec(t *testing.T) { ClusterProxy: bootstrapClusterProxy, Cluster: cluster, ControlPlane: controlPlane, - KubernetesUpgradeVersion: e2eConfig.MustGetVariable(KubernetesVersionSecondUpgradeTo), + KubernetesUpgradeVersion: e2eConfig.MustGetVariable(K0sVersionSecondUpgradeTo), WaitForKubeProxyUpgradeInterval: util.GetInterval(e2eConfig, testName, "wait-kube-proxy-upgrade"), WaitForControlPlaneReadyInterval: util.GetInterval(e2eConfig, testName, "wait-control-plane"), }) diff --git a/e2e/workload_cluster_recreate_delete_first_upgrade_test.go b/e2e/workload_cluster_recreate_delete_first_upgrade_test.go index 92569f75c..909345438 100644 --- a/e2e/workload_cluster_recreate_delete_first_upgrade_test.go +++ b/e2e/workload_cluster_recreate_delete_first_upgrade_test.go @@ -59,15 +59,14 @@ func workloadClusterRecreateDeleteFirstUpgradeSpec(t *testing.T) { ClusterctlConfigPath: clusterctlConfigPath, KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(), // no flavor specified, so it will use the default one "cluster-template" - Flavor: "", - + Flavor: "", Namespace: namespace.Name, ClusterName: clusterName, KubernetesVersion: e2eConfig.MustGetVariable(KubernetesVersion), - ControlPlaneMachineCount: ptr.To[int64](3), - // TODO: make infra provider configurable - InfrastructureProvider: "docker", - LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()), + ControlPlaneMachineCount: ptr.To(int64(controlPlaneMachineCount)), + WorkerMachineCount: ptr.To(int64(workerMachineCount)), + InfrastructureProvider: infrastructureProvider, + LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()), ClusterctlVariables: map[string]string{ "CLUSTER_NAME": clusterName, "NAMESPACE": namespace.Name, @@ -99,6 +98,7 @@ func workloadClusterRecreateDeleteFirstUpgradeSpec(t *testing.T) { util.GetInterval(e2eConfig, testName, "wait-delete-cluster"), skipCleanup, clusterctlConfigPath, + infrastructureProvider, ) }() @@ -113,7 +113,7 @@ func workloadClusterRecreateDeleteFirstUpgradeSpec(t *testing.T) { ClusterProxy: bootstrapClusterProxy, Cluster: cluster, ControlPlane: controlPlane, - KubernetesUpgradeVersion: e2eConfig.MustGetVariable(KubernetesVersionFirstUpgradeTo), + KubernetesUpgradeVersion: e2eConfig.MustGetVariable(K0sVersionFirstUpgradeTo), WaitForKubeProxyUpgradeInterval: util.GetInterval(e2eConfig, testName, "wait-kube-proxy-upgrade"), WaitForControlPlaneReadyInterval: util.GetInterval(e2eConfig, testName, "wait-control-plane"), }) @@ -124,7 +124,7 @@ func workloadClusterRecreateDeleteFirstUpgradeSpec(t *testing.T) { ClusterProxy: bootstrapClusterProxy, Cluster: cluster, ControlPlane: controlPlane, - KubernetesUpgradeVersion: e2eConfig.MustGetVariable(KubernetesVersionSecondUpgradeTo), + KubernetesUpgradeVersion: e2eConfig.MustGetVariable(K0sVersionSecondUpgradeTo), WaitForKubeProxyUpgradeInterval: util.GetInterval(e2eConfig, testName, "wait-kube-proxy-upgrade"), WaitForControlPlaneReadyInterval: util.GetInterval(e2eConfig, testName, "wait-control-plane"), }) diff --git a/e2e/workload_cluster_recreate_upgrade_test.go b/e2e/workload_cluster_recreate_upgrade_test.go index 8e1232fc9..dc78f1069 100644 --- a/e2e/workload_cluster_recreate_upgrade_test.go +++ b/e2e/workload_cluster_recreate_upgrade_test.go @@ -59,15 +59,14 @@ func workloadClusterRecreateUpgradeSpec(t *testing.T) { ClusterctlConfigPath: clusterctlConfigPath, KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(), // no flavor specified, so it will use the default one "cluster-template" - Flavor: "", - + Flavor: "", Namespace: namespace.Name, ClusterName: clusterName, KubernetesVersion: e2eConfig.MustGetVariable(KubernetesVersion), - ControlPlaneMachineCount: ptr.To[int64](3), - // TODO: make infra provider configurable - InfrastructureProvider: "docker", - LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()), + ControlPlaneMachineCount: ptr.To(int64(controlPlaneMachineCount)), + WorkerMachineCount: ptr.To(int64(workerMachineCount)), + InfrastructureProvider: infrastructureProvider, + LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()), ClusterctlVariables: map[string]string{ "CLUSTER_NAME": clusterName, "NAMESPACE": namespace.Name, @@ -99,6 +98,7 @@ func workloadClusterRecreateUpgradeSpec(t *testing.T) { util.GetInterval(e2eConfig, testName, "wait-delete-cluster"), skipCleanup, clusterctlConfigPath, + infrastructureProvider, ) }() @@ -113,7 +113,7 @@ func workloadClusterRecreateUpgradeSpec(t *testing.T) { ClusterProxy: bootstrapClusterProxy, Cluster: cluster, ControlPlane: controlPlane, - KubernetesUpgradeVersion: e2eConfig.MustGetVariable(KubernetesVersionFirstUpgradeTo), + KubernetesUpgradeVersion: e2eConfig.MustGetVariable(K0sVersionFirstUpgradeTo), WaitForKubeProxyUpgradeInterval: util.GetInterval(e2eConfig, testName, "wait-kube-proxy-upgrade"), WaitForControlPlaneReadyInterval: util.GetInterval(e2eConfig, testName, "wait-control-plane"), }) @@ -124,7 +124,7 @@ func workloadClusterRecreateUpgradeSpec(t *testing.T) { ClusterProxy: bootstrapClusterProxy, Cluster: cluster, ControlPlane: controlPlane, - KubernetesUpgradeVersion: e2eConfig.MustGetVariable(KubernetesVersionSecondUpgradeTo), + KubernetesUpgradeVersion: e2eConfig.MustGetVariable(K0sVersionSecondUpgradeTo), WaitForKubeProxyUpgradeInterval: util.GetInterval(e2eConfig, testName, "wait-kube-proxy-upgrade"), WaitForControlPlaneReadyInterval: util.GetInterval(e2eConfig, testName, "wait-control-plane"), }) diff --git a/internal/controller/controlplane/helper.go b/internal/controller/controlplane/helper.go index 18c251400..8f72bb40d 100644 --- a/internal/controller/controlplane/helper.go +++ b/internal/controller/controlplane/helper.go @@ -364,7 +364,6 @@ func hasControllerConfigChanged(bootstrapConfigs map[string]bootstrapv1.K0sContr machineK0sConfig.K0s = nil return cmp.Diff(kcpK0sConfig, machineK0sConfig) != "" - } // Deprecated: This function is kept for backward compatibility with clusters created with versions that does not add an annotation in the diff --git a/internal/controller/controlplane/k0s_controlplane_controller.go b/internal/controller/controlplane/k0s_controlplane_controller.go index 0e5d54d8d..fc7d86cff 100644 --- a/internal/controller/controlplane/k0s_controlplane_controller.go +++ b/internal/controller/controlplane/k0s_controlplane_controller.go @@ -21,10 +21,11 @@ import ( "errors" "fmt" "reflect" - "sigs.k8s.io/controller-runtime/pkg/controller" "strings" "time" + "sigs.k8s.io/controller-runtime/pkg/controller" + "github.com/google/uuid" autopilot "github.com/k0sproject/k0s/pkg/apis/autopilot/v1beta2" "github.com/k0sproject/k0smotron/internal/controller/util" diff --git a/internal/controller/infrastructure/ssh_provisioner.go b/internal/controller/infrastructure/ssh_provisioner.go index d368cddd8..c5822c16b 100644 --- a/internal/controller/infrastructure/ssh_provisioner.go +++ b/internal/controller/infrastructure/ssh_provisioner.go @@ -104,9 +104,11 @@ func (p *SSHProvisioner) Provision(ctx context.Context) error { // Write files first for _, file := range p.cloudInit.Files { + p.log.Info("Uploading file", "path", file.Path, "permissions", file.Permissions) if err := p.uploadFile(rigClient, file); err != nil { return fmt.Errorf("failed to upload file: %w", err) } + p.log.Info("Uploaded file", "path", file.Path, "permissions", file.Permissions) } if p.machine.Spec.CommandsAsScript { @@ -250,6 +252,5 @@ func (p *SSHProvisioner) uploadFile(client *rig.Client, file provisioner.File) e return fmt.Errorf("failed to write to remote file: %w", err) } - p.log.Info("uploaded file", "path", file.Path, "permissions", perms) return nil } diff --git a/inttest/capi-remote-machine-template-update/capi_remote_machine_template_update_test.go b/inttest/capi-remote-machine-template-update/capi_remote_machine_template_update_test.go deleted file mode 100644 index 9e510932e..000000000 --- a/inttest/capi-remote-machine-template-update/capi_remote_machine_template_update_test.go +++ /dev/null @@ -1,432 +0,0 @@ -/* -Copyright 2023. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package capiremotemachinetemplate - -import ( - "bytes" - "context" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "encoding/base64" - "encoding/json" - "encoding/pem" - "fmt" - "os" - "os/exec" - "strconv" - "strings" - "testing" - "text/template" - "time" - - "k8s.io/apimachinery/pkg/util/wait" - - "github.com/k0sproject/k0s/inttest/common" - infra "github.com/k0sproject/k0smotron/api/infrastructure/v1beta1" - "github.com/k0sproject/k0smotron/inttest/util" - - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - "golang.org/x/crypto/ssh" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" -) - -type RemoteMachineTemplateUpdateSuite struct { - common.FootlooseSuite - - client *kubernetes.Clientset - restConfig *rest.Config - clusterYamlsPath string - updatedClusterYamlsPath string - privateKey []byte - publicKey []byte -} - -func (s *RemoteMachineTemplateUpdateSuite) SetupSuite() { - s.FootlooseSuite.SetupSuite() -} - -func TestRemoteMachineSuite(t *testing.T) { - kubeConfigPath := os.Getenv("KUBECONFIG") - require.NotEmpty(t, kubeConfigPath, "KUBECONFIG env var must be set and point to kind cluster") - // Get kube client from kubeconfig - restCfg, err := clientcmd.BuildConfigFromFlags("", kubeConfigPath) - require.NoError(t, err) - require.NotNil(t, restCfg) - - // Get kube client from kubeconfig - kubeClient, err := kubernetes.NewForConfig(restCfg) - require.NoError(t, err) - require.NotNil(t, kubeClient) - - // Create keypair to use with SSH - privateKey, err := rsa.GenerateKey(rand.Reader, 2048) - require.NoError(t, err) - - // Convert the private key to PEM format - privateKeyBytes := x509.MarshalPKCS1PrivateKey(privateKey) - privateKeyPEM := pem.EncodeToMemory(&pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: privateKeyBytes, - }) - - // Extract the public key from the private key - publicKey := &privateKey.PublicKey - - // Convert the public key to the OpenSSH format - sshPublicKey, err := ssh.NewPublicKey(publicKey) - require.NoError(t, err) - sshPublicKeyBytes := ssh.MarshalAuthorizedKey(sshPublicKey) - - tmpDir := t.TempDir() - - s := RemoteMachineTemplateUpdateSuite{ - common.FootlooseSuite{ - ControllerCount: 0, - WorkerCount: 0, - K0smotronWorkerCount: 1, - K0smotronNetworks: []string{"kind"}, - }, - kubeClient, - restCfg, - tmpDir + "/cluster.yaml", - tmpDir + "/updated-cluster.yaml", - privateKeyPEM, - sshPublicKeyBytes, - } - suite.Run(t, &s) -} - -func (s *RemoteMachineTemplateUpdateSuite) TestCAPIRemoteMachine() { - ctx := s.Context() - // Push public key to worker authorized_keys - workerSSH, err := s.SSH(ctx, s.K0smotronNode(0)) - s.Require().NoError(err) - defer workerSSH.Disconnect() - s.T().Log("Pushing public key to worker") - s.Require().NoError(workerSSH.Exec(s.Context(), "cat >>/root/.ssh/authorized_keys", common.SSHStreams{In: bytes.NewReader(s.publicKey)})) - - s.Require().NoError(err) - defer func() { - keep := os.Getenv("KEEP_AFTER_TEST") - if keep == "true" { - return - } - if keep == "on-failure" && s.T().Failed() { - return - } - s.T().Log("Deleting cluster objects") - s.Require().NoError(util.DeleteCluster("remote-test-cluster")) - }() - - s.createCluster() - - s.T().Log("cluster objects applied, waiting for cluster to be ready") - var localPort int - // nolint:staticcheck - err = wait.PollImmediateUntilWithContext(ctx, 1*time.Second, func(ctx context.Context) (bool, error) { - localPort, _ = getLBPort("TestRemoteMachineSuite-k0smotron0") - return localPort > 0, nil - }) - s.Require().NoError(err) - s.T().Log("waiting to see admin kubeconfig secret") - s.Require().NoError(util.WaitForSecret(ctx, s.client, "remote-test-cluster-kubeconfig", "default")) - kmcKC, err := util.GetKMCClientSet(ctx, s.client, "remote-test-cluster", "default", localPort) - s.Require().NoError(err) - - s.T().Log("verify the RemoteMachine is at expected state") - var rmName string - // nolint:staticcheck - err = wait.PollImmediateUntilWithContext(ctx, 1*time.Second, func(ctx context.Context) (bool, error) { - rm, err := s.findRemoteMachines("default") - if err != nil { - return false, err - } - - if len(rm) == 0 { - return true, nil - } - - rmName = rm[0].GetName() - return true, nil - }) - s.Require().NoError(err) - - // nolint:staticcheck - err = wait.PollImmediateUntilWithContext(ctx, 1*time.Second, func(ctx context.Context) (bool, error) { - rm, err := s.getRemoteMachine(rmName, "default") - if err != nil { - return false, err - } - - expectedProviderID := fmt.Sprintf("remote-machine://%s:22", s.getWorkerIP()) - return rm.Status.Ready && expectedProviderID == rm.Spec.ProviderID, nil - }) - s.Require().NoError(err) - - s.T().Log("waiting for node to be ready") - - machines, err := util.GetControlPlaneMachinesByKcpName(ctx, "remote-test", "default", s.client) - s.Require().NoError(err) - s.Require().Len(machines, 1, "Expected 1 machine for K0sControlPlane remote-test, got %d", len(machines)) - - s.Require().NoError(common.WaitForNodeReadyStatus(ctx, kmcKC, machines[0].GetName(), corev1.ConditionTrue)) - - s.T().Log("update cluster") - s.updateCluster() - // nolint:staticcheck - err = wait.PollImmediateUntilWithContext(ctx, 1*time.Second, func(ctx context.Context) (bool, error) { - output, err := exec.Command("docker", "exec", "TestRemoteMachineSuite-k0smotron0", "k0s", "status").Output() - if err != nil { - return false, nil - } - - return strings.Contains(string(output), "Version: v1.29"), nil - }) - s.Require().NoError(err) - - s.T().Log("waiting for node to be ready in updated cluster") - machines, err = util.GetControlPlaneMachinesByKcpName(ctx, "remote-test", "default", s.client) - s.Require().NoError(err) - s.Require().Len(machines, 1, "Expected 1 machine for K0sControlPlane remote-test, got %d", len(machines)) - - s.Require().NoError(common.WaitForNodeReadyStatus(ctx, kmcKC, machines[0].GetName(), corev1.ConditionTrue)) -} - -func (s *RemoteMachineTemplateUpdateSuite) findRemoteMachines(namespace string) ([]infra.RemoteMachine, error) { - apiPath := fmt.Sprintf("/apis/infrastructure.cluster.x-k8s.io/v1beta1/namespaces/%s/remotemachines", namespace) - result, err := s.client.RESTClient().Get().AbsPath(apiPath).DoRaw(s.Context()) - if err != nil { - return nil, err - } - rm := &infra.RemoteMachineList{} - if err := yaml.Unmarshal(result, rm); err != nil { - return nil, err - } - return rm.Items, nil -} - -func (s *RemoteMachineTemplateUpdateSuite) getRemoteMachine(name string, namespace string) (*infra.RemoteMachine, error) { - apiPath := fmt.Sprintf("/apis/infrastructure.cluster.x-k8s.io/v1beta1/namespaces/%s/remotemachines/%s", namespace, name) - result, err := s.client.RESTClient().Get().AbsPath(apiPath).DoRaw(s.Context()) - if err != nil { - return nil, err - } - rm := &infra.RemoteMachine{} - if err := yaml.Unmarshal(result, rm); err != nil { - return nil, err - } - return rm, nil -} - -func (s *RemoteMachineTemplateUpdateSuite) updateCluster() { - out, err := exec.Command("kubectl", "apply", "-f", s.updatedClusterYamlsPath).CombinedOutput() - s.Require().NoError(err, "failed to update cluster objects: %s", string(out)) -} - -func (s *RemoteMachineTemplateUpdateSuite) createCluster() { - - // Get worker IP - workerIP := s.getWorkerIP() - s.Require().NotEmpty(workerIP) - - // Get SSH key - machines, err := s.InspectMachines([]string{s.K0smotronNode(0)}) - s.Require().NoError(err) - s.Require().NotEmpty(machines) - - // Parse the cluster yaml as template - t, err := template.New("cluster").Parse(clusterYaml) - s.Require().NoError(err) - - // Execute the template to buffer - var clusterYaml bytes.Buffer - - err = t.Execute(&clusterYaml, struct { - Address string - SSHKey string - }{ - Address: workerIP, - SSHKey: base64.StdEncoding.EncodeToString(s.privateKey), - }) - s.Require().NoError(err) - bytes := clusterYaml.Bytes() - - s.Require().NoError(os.WriteFile(s.clusterYamlsPath, bytes, 0644)) - out, err := exec.Command("kubectl", "apply", "-f", s.clusterYamlsPath).CombinedOutput() - s.Require().NoError(os.WriteFile(s.updatedClusterYamlsPath, []byte(updatedClusterYaml), 0644)) - s.Require().NoError(err, "failed to update cluster objects: %s", string(out)) -} - -func getLBPort(name string) (int, error) { - b, err := exec.Command("docker", "inspect", name, "--format", "{{json .NetworkSettings.Ports}}").Output() - if err != nil { - return 0, fmt.Errorf("failed to get inspect info from container %s: %w", name, err) - } - - var ports map[string][]map[string]string - err = json.Unmarshal(b, &ports) - if err != nil { - return 0, fmt.Errorf("failed to unmarshal inspect info from container %s: %w", name, err) - } - - return strconv.Atoi(ports["6443/tcp"][0]["HostPort"]) -} - -func (s *RemoteMachineTemplateUpdateSuite) getWorkerIP() string { - nodeName := s.K0smotronNode(0) - ssh, err := s.SSH(s.Context(), nodeName) - s.Require().NoError(err) - defer ssh.Disconnect() - - ipAddress, err := ssh.ExecWithOutput(s.Context(), "hostname -i") - s.Require().NoError(err) - return ipAddress -} - -var clusterYaml = ` -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 -kind: K0sControlPlane -metadata: - name: remote-test -spec: - replicas: 1 - version: v1.28.7+k0s.0 - k0sConfigSpec: - k0s: - apiVersion: k0s.k0sproject.io/v1beta1 - kind: ClusterConfig - metadata: - name: k0s - spec: - api: - extraArgs: - anonymous-auth: "true" - telemetry: - enabled: false - args: - - --enable-worker - - --no-taints - machineTemplate: - infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 - kind: RemoteMachineTemplate - name: remote-test-cp-template - namespace: default ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: RemoteCluster -metadata: - name: remote-test - namespace: default -spec: ---- -apiVersion: cluster.x-k8s.io/v1beta1 -kind: Cluster -metadata: - name: remote-test-cluster - namespace: default -spec: - clusterNetwork: - pods: - cidrBlocks: - - 192.168.0.0/16 - serviceDomain: cluster.local - services: - cidrBlocks: - - 10.128.0.0/12 - controlPlaneEndpoint: - host: {{ .Address }} - port: 6443 - controlPlaneRef: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 - kind: K0sControlPlane - name: remote-test - infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 - kind: RemoteCluster - name: remote-test ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: RemoteMachineTemplate -metadata: - name: remote-test-cp-template - namespace: default -spec: - template: - spec: - pool: default ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: PooledRemoteMachine -metadata: - name: remote-test-0 - namespace: default -spec: - pool: default - machine: - address: {{ .Address }} - port: 22 - user: root - sshKeyRef: - name: footloose-key ---- -apiVersion: v1 -kind: Secret -metadata: - name: footloose-key - namespace: default -data: - value: {{ .SSHKey }} -type: Opaque -` - -var updatedClusterYaml = ` -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 -kind: K0sControlPlane -metadata: - name: remote-test -spec: - replicas: 1 - version: v1.29.2+k0s.0 - k0sConfigSpec: - k0s: - apiVersion: k0s.k0sproject.io/v1beta1 - kind: ClusterConfig - metadata: - name: k0s - spec: - api: - extraArgs: - anonymous-auth: "true" - telemetry: - enabled: false - args: - - --enable-worker - - --no-taints - machineTemplate: - infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 - kind: RemoteMachineTemplate - name: remote-test-cp-template - namespace: default -` diff --git a/inttest/capi-remote-machine-template/capi_remote_machine_template_test.go b/inttest/capi-remote-machine-template/capi_remote_machine_template_test.go deleted file mode 100644 index e5a7964b6..000000000 --- a/inttest/capi-remote-machine-template/capi_remote_machine_template_test.go +++ /dev/null @@ -1,386 +0,0 @@ -/* -Copyright 2023. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package capiremotemachinetemplate - -import ( - "bytes" - "context" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "encoding/base64" - "encoding/json" - "encoding/pem" - "fmt" - "os" - "os/exec" - "strconv" - "testing" - "text/template" - "time" - - "k8s.io/apimachinery/pkg/util/wait" - - "github.com/k0sproject/k0s/inttest/common" - infra "github.com/k0sproject/k0smotron/api/infrastructure/v1beta1" - "github.com/k0sproject/k0smotron/inttest/util" - - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - "golang.org/x/crypto/ssh" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" -) - -type RemoteMachineTemplateSuite struct { - common.FootlooseSuite - - client *kubernetes.Clientset - restConfig *rest.Config - clusterYamlsPath string - privateKey []byte - publicKey []byte -} - -func (s *RemoteMachineTemplateSuite) SetupSuite() { - s.FootlooseSuite.SetupSuite() -} - -func TestRemoteMachineSuite(t *testing.T) { - kubeConfigPath := os.Getenv("KUBECONFIG") - require.NotEmpty(t, kubeConfigPath, "KUBECONFIG env var must be set and point to kind cluster") - // Get kube client from kubeconfig - restCfg, err := clientcmd.BuildConfigFromFlags("", kubeConfigPath) - require.NoError(t, err) - require.NotNil(t, restCfg) - - // Get kube client from kubeconfig - kubeClient, err := kubernetes.NewForConfig(restCfg) - require.NoError(t, err) - require.NotNil(t, kubeClient) - - // Create keypair to use with SSH - privateKey, err := rsa.GenerateKey(rand.Reader, 2048) - require.NoError(t, err) - - // Convert the private key to PEM format - privateKeyBytes := x509.MarshalPKCS1PrivateKey(privateKey) - privateKeyPEM := pem.EncodeToMemory(&pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: privateKeyBytes, - }) - - // Extract the public key from the private key - publicKey := &privateKey.PublicKey - - // Convert the public key to the OpenSSH format - sshPublicKey, err := ssh.NewPublicKey(publicKey) - require.NoError(t, err) - sshPublicKeyBytes := ssh.MarshalAuthorizedKey(sshPublicKey) - - tmpDir := t.TempDir() - - s := RemoteMachineTemplateSuite{ - common.FootlooseSuite{ - ControllerCount: 0, - WorkerCount: 0, - K0smotronWorkerCount: 1, - K0smotronNetworks: []string{"kind"}, - }, - kubeClient, - restCfg, - tmpDir + "/cluster.yaml", - privateKeyPEM, - sshPublicKeyBytes, - } - suite.Run(t, &s) -} - -func (s *RemoteMachineTemplateSuite) TestCAPIRemoteMachine() { - ctx := s.Context() - // Push public key to worker authorized_keys - workerSSH, err := s.SSH(ctx, s.K0smotronNode(0)) - s.Require().NoError(err) - defer workerSSH.Disconnect() - s.T().Log("Pushing public key to worker") - s.Require().NoError(workerSSH.Exec(s.Context(), "cat >>/root/.ssh/authorized_keys", common.SSHStreams{In: bytes.NewReader(s.publicKey)})) - - s.Require().NoError(err) - defer func() { - keep := os.Getenv("KEEP_AFTER_TEST") - if keep == "true" { - return - } - if keep == "on-failure" && s.T().Failed() { - return - } - s.T().Log("Deleting cluster objects") - s.Require().NoError(util.DeleteCluster("remote-test-cluster")) - }() - - s.createCluster() - - s.T().Log("cluster objects applied, waiting for cluster to be ready") - var localPort int - // nolint:staticcheck - err = wait.PollImmediateUntilWithContext(ctx, 1*time.Second, func(ctx context.Context) (bool, error) { - localPort, _ = getLBPort("TestRemoteMachineSuite-k0smotron0") - return localPort > 0, nil - }) - s.Require().NoError(err) - s.T().Log("waiting to see admin kubeconfig secret") - s.Require().NoError(util.WaitForSecret(ctx, s.client, "remote-test-cluster-kubeconfig", "default")) - kmcKC, err := util.GetKMCClientSet(ctx, s.client, "remote-test-cluster", "default", localPort) - s.Require().NoError(err) - - s.T().Log("verify the RemoteMachine is at expected state") - expectedProviderID := fmt.Sprintf("remote-machine://%s:22", s.getWorkerIP()) - controlPlaneMachineName := "" - // nolint:staticcheck - err = wait.PollImmediateUntilWithContext(ctx, 1*time.Second, func(ctx context.Context) (bool, error) { - machines, err := util.GetControlPlaneMachinesByKcpName(ctx, "remote-test", "default", s.client) - if err != nil { - return false, nil - } - - if len(machines) != 1 { - return false, nil - } - - controlPlaneMachineName = machines[0].GetName() - - if machines[0].Labels["my-custom-label"] != "my-custom-value" { - return false, nil - } - - rm, err := s.getRemoteMachine(controlPlaneMachineName, "default") - if err != nil { - s.T().Log(err) - return false, nil - } - - return rm.Status.Ready && expectedProviderID == rm.Spec.ProviderID, nil - }) - s.Require().NoError(err) - - s.T().Log("waiting for node to be ready") - s.Require().NoError(common.WaitForNodeReadyStatus(ctx, kmcKC, controlPlaneMachineName, corev1.ConditionTrue)) - - err = wait.PollUntilContextCancel(ctx, time.Second, true, func(ctx context.Context) (done bool, err error) { - node, err := kmcKC.CoreV1().Nodes().Get(ctx, controlPlaneMachineName, metav1.GetOptions{}) - if err != nil { - return false, err - } - - return node.Labels["k0smotron.io/machine-name"] == controlPlaneMachineName && node.Spec.ProviderID == expectedProviderID, nil - }) - s.Require().NoError(err) - - s.T().Log("deleting node from cluster") - s.Require().NoError(s.deleteRemoteMachine(controlPlaneMachineName, "default")) - - nodes, err := kmcKC.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) - s.Require().NoError(err) - s.Require().Equal(corev1.ConditionFalse, nodes.Items[0].Status.Conditions[0].Status) - -} - -func (s *RemoteMachineTemplateSuite) getRemoteMachine(name string, namespace string) (*infra.RemoteMachine, error) { - apiPath := fmt.Sprintf("/apis/infrastructure.cluster.x-k8s.io/v1beta1/namespaces/%s/remotemachines/%s", namespace, name) - result, err := s.client.RESTClient().Get().AbsPath(apiPath).DoRaw(s.Context()) - if err != nil { - return nil, err - } - rm := &infra.RemoteMachine{} - if err := yaml.Unmarshal(result, rm); err != nil { - return nil, err - } - return rm, nil -} - -func (s *RemoteMachineTemplateSuite) deleteRemoteMachine(name string, namespace string) error { - apiPath := fmt.Sprintf("/apis/infrastructure.cluster.x-k8s.io/v1beta1/namespaces/%s/remotemachines/%s", namespace, name) - _, err := s.client.RESTClient().Delete().AbsPath(apiPath).DoRaw(s.Context()) - return err -} - -func (s *RemoteMachineTemplateSuite) createCluster() { - - // Get worker IP - workerIP := s.getWorkerIP() - s.Require().NotEmpty(workerIP) - - // Get SSH key - machines, err := s.InspectMachines([]string{s.K0smotronNode(0)}) - s.Require().NoError(err) - s.Require().NotEmpty(machines) - - // Parse the cluster yaml as template - t, err := template.New("cluster").Parse(clusterYaml) - s.Require().NoError(err) - - // Execute the template to buffer - var clusterYamlBuf bytes.Buffer - - k0sVersion := os.Getenv("K0S_VERSION") - - err = t.Execute(&clusterYamlBuf, struct { - Address string - SSHKey string - K0SVersion string - }{ - Address: workerIP, - SSHKey: base64.StdEncoding.EncodeToString(s.privateKey), - K0SVersion: k0sVersion, - }) - s.Require().NoError(err) - bytes := clusterYamlBuf.Bytes() - - s.Require().NoError(os.WriteFile(s.clusterYamlsPath, bytes, 0644)) - out, err := exec.Command("kubectl", "apply", "-f", s.clusterYamlsPath).CombinedOutput() - s.Require().NoError(err, "failed to update cluster objects: %s", string(out)) -} - -func getLBPort(name string) (int, error) { - b, err := exec.Command("docker", "inspect", name, "--format", "{{json .NetworkSettings.Ports}}").Output() - if err != nil { - return 0, fmt.Errorf("failed to get inspect info from container %s: %w", name, err) - } - - var ports map[string][]map[string]string - err = json.Unmarshal(b, &ports) - if err != nil { - return 0, fmt.Errorf("failed to unmarshal inspect info from container %s: %w", name, err) - } - - return strconv.Atoi(ports["6443/tcp"][0]["HostPort"]) -} - -func (s *RemoteMachineTemplateSuite) getWorkerIP() string { - nodeName := s.K0smotronNode(0) - ssh, err := s.SSH(s.Context(), nodeName) - s.Require().NoError(err) - defer ssh.Disconnect() - - ipAddress, err := ssh.ExecWithOutput(s.Context(), "hostname -i") - s.Require().NoError(err) - return ipAddress -} - -var clusterYaml = ` -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 -kind: K0sControlPlane -metadata: - name: remote-test -spec: - replicas: 1 - version: {{ .K0SVersion }}+k0s.0 - k0sConfigSpec: - k0s: - apiVersion: k0s.k0sproject.io/v1beta1 - kind: ClusterConfig - metadata: - name: k0s - spec: - api: - extraArgs: - anonymous-auth: "true" - telemetry: - enabled: false - args: - - --enable-worker - - --no-taints - machineTemplate: - infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 - kind: RemoteMachineTemplate - name: remote-test-cp-template - namespace: default ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: RemoteCluster -metadata: - name: remote-test - namespace: default -spec: ---- -apiVersion: cluster.x-k8s.io/v1beta1 -kind: Cluster -metadata: - name: remote-test-cluster - namespace: default -spec: - clusterNetwork: - pods: - cidrBlocks: - - 192.168.0.0/16 - serviceDomain: cluster.local - services: - cidrBlocks: - - 10.128.0.0/12 - controlPlaneEndpoint: - host: {{ .Address }} - port: 6443 - controlPlaneRef: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 - kind: K0sControlPlane - name: remote-test - infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 - kind: RemoteCluster - name: remote-test ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: RemoteMachineTemplate -metadata: - name: remote-test-cp-template - namespace: default -spec: - template: - spec: - pool: default ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: PooledRemoteMachine -metadata: - name: remote-test-0 - namespace: default - labels: - my-custom-label: my-custom-value -spec: - pool: default - machine: - address: {{ .Address }} - port: 22 - user: root - commandsAsScript: true - sshKeyRef: - name: footloose-key ---- -apiVersion: v1 -kind: Secret -metadata: - name: footloose-key - namespace: default -data: - value: {{ .SSHKey }} -type: Opaque -` diff --git a/inttest/capi-remote-machine/capi_remote_machine_test.go b/inttest/capi-remote-machine/capi_remote_machine_test.go deleted file mode 100644 index aca715994..000000000 --- a/inttest/capi-remote-machine/capi_remote_machine_test.go +++ /dev/null @@ -1,352 +0,0 @@ -/* -Copyright 2023. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package capiremotemachine - -import ( - "bytes" - "context" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "encoding/base64" - "encoding/pem" - "fmt" - "os" - "testing" - "text/template" - "time" - - "github.com/k0sproject/k0s/inttest/common" - infra "github.com/k0sproject/k0smotron/api/infrastructure/v1beta1" - "github.com/k0sproject/k0smotron/inttest/util" - - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" - "golang.org/x/crypto/ssh" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" -) - -type RemoteMachineSuite struct { - common.FootlooseSuite - - client *kubernetes.Clientset - restConfig *rest.Config - privateKey []byte - publicKey []byte -} - -func (s *RemoteMachineSuite) SetupSuite() { - s.FootlooseSuite.SetupSuite() -} - -func TestRemoteMachineSuite(t *testing.T) { - kubeConfigPath := os.Getenv("KUBECONFIG") - require.NotEmpty(t, kubeConfigPath, "KUBECONFIG env var must be set and point to kind cluster") - // Get kube client from kubeconfig - restCfg, err := clientcmd.BuildConfigFromFlags("", kubeConfigPath) - require.NoError(t, err) - require.NotNil(t, restCfg) - - // Get kube client from kubeconfig - kubeClient, err := kubernetes.NewForConfig(restCfg) - require.NoError(t, err) - require.NotNil(t, kubeClient) - - // Create keypair to use with SSH - privateKey, err := rsa.GenerateKey(rand.Reader, 2048) - require.NoError(t, err) - - // Convert the private key to PEM format - privateKeyBytes := x509.MarshalPKCS1PrivateKey(privateKey) - privateKeyPEM := pem.EncodeToMemory(&pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: privateKeyBytes, - }) - - // Extract the public key from the private key - publicKey := &privateKey.PublicKey - - // Convert the public key to the OpenSSH format - sshPublicKey, err := ssh.NewPublicKey(publicKey) - require.NoError(t, err) - sshPublicKeyBytes := ssh.MarshalAuthorizedKey(sshPublicKey) - - s := RemoteMachineSuite{ - common.FootlooseSuite{ - ControllerCount: 0, - WorkerCount: 0, - K0smotronWorkerCount: 1, - K0smotronNetworks: []string{"kind"}, - }, - kubeClient, - restCfg, - privateKeyPEM, - sshPublicKeyBytes, - } - suite.Run(t, &s) -} - -func (s *RemoteMachineSuite) TestCAPIRemoteMachine() { - ctx := s.Context() - // Push public key to worker authorized_keys - workerSSH, err := s.SSH(ctx, s.K0smotronNode(0)) - s.Require().NoError(err) - defer workerSSH.Disconnect() - s.T().Log("Pushing public key to worker") - s.Require().NoError(workerSSH.Exec(s.Context(), "cat >>/root/.ssh/authorized_keys", common.SSHStreams{In: bytes.NewReader(s.publicKey)})) - - s.Require().NoError(err) - defer func() { - keep := os.Getenv("KEEP_AFTER_TEST") - if keep == "true" { - return - } - if keep == "on-failure" && s.T().Failed() { - return - } - s.T().Log("Deleting cluster objects") - s.deleteCluster() - }() - - s.createCluster() - - s.T().Log("cluster objects applied, waiting for cluster to be ready") - - // Wait for the cluster to be ready - // Wait to see the CP pods ready - s.Require().NoError(common.WaitForStatefulSet(ctx, s.client, "kmc-remote-test", "default")) - - s.T().Log("Starting portforward") - fw, err := util.GetPortForwarder(s.restConfig, "kmc-remote-test-0", "default", 30443) - s.Require().NoError(err) - - go fw.Start(s.Require().NoError) - defer fw.Close() - - <-fw.ReadyChan - - localPort, err := fw.LocalPort() - s.Require().NoError(err) - s.T().Log("waiting to see admin kubeconfig secret") - s.Require().NoError(util.WaitForSecret(ctx, s.client, "remote-test-kubeconfig", "default")) - kmcKC, err := util.GetKMCClientSet(ctx, s.client, "remote-test", "default", localPort) - s.Require().NoError(err) - - s.T().Log("waiting for node to be ready") - s.Require().NoError(common.WaitForNodeReadyStatus(ctx, kmcKC, "remote-test-0", corev1.ConditionTrue)) - // Verify the RemoteMachine is at expected state - rm, err := s.getRemoteMachine("remote-test-0", "default") - s.Require().NoError(err) - s.Require().True(rm.Status.Ready) - expectedProviderID := fmt.Sprintf("remote-machine://%s:22", s.getWorkerIP()) - s.Require().Equal(expectedProviderID, rm.Spec.ProviderID) - - err = wait.PollUntilContextCancel(ctx, time.Second, true, func(ctx context.Context) (done bool, err error) { - node, err := kmcKC.CoreV1().Nodes().Get(ctx, "remote-test-0", metav1.GetOptions{}) - if err != nil { - return false, err - } - - return node.Labels["k0smotron.io/machine-name"] == "remote-test-0" && node.Spec.ProviderID == expectedProviderID, nil - }) - s.Require().NoError(err) - - s.T().Log("deleting node from cluster") - s.Require().NoError(s.deleteRemoteMachine("remote-test-0", "default")) - - nodes, err := kmcKC.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) - s.Require().NoError(err) - s.Require().Equal(corev1.ConditionFalse, nodes.Items[0].Status.Conditions[0].Status) - -} - -func (s *RemoteMachineSuite) getRemoteMachine(name string, namespace string) (*infra.RemoteMachine, error) { - apiPath := fmt.Sprintf("/apis/infrastructure.cluster.x-k8s.io/v1beta1/namespaces/%s/remotemachines/%s", namespace, name) - result, err := s.client.RESTClient().Get().AbsPath(apiPath).DoRaw(s.Context()) - if err != nil { - return nil, err - } - rm := &infra.RemoteMachine{} - if err := yaml.Unmarshal(result, rm); err != nil { - return nil, err - } - return rm, nil -} - -func (s *RemoteMachineSuite) deleteRemoteMachine(name string, namespace string) error { - apiPath := fmt.Sprintf("/apis/infrastructure.cluster.x-k8s.io/v1beta1/namespaces/%s/remotemachines/%s", namespace, name) - _, err := s.client.RESTClient().Delete().AbsPath(apiPath).DoRaw(s.Context()) - return err -} - -func (s *RemoteMachineSuite) deleteCluster() { - response := s.client.RESTClient().Delete().AbsPath("/apis/cluster.x-k8s.io/v1beta1/namespaces/default/clusters/remote-test").Do(s.Context()) - s.Require().NoError(response.Error()) - if err := s.client.CoreV1().Secrets("default").Delete(s.Context(), "footloose-key", metav1.DeleteOptions{}); err != nil { - s.T().Logf("failed to delete footloose SSH key secret: %s", err.Error()) - } -} - -func (s *RemoteMachineSuite) createCluster() { - - // Get worker IP - workerIP := s.getWorkerIP() - s.Require().NotEmpty(workerIP) - - // Get SSH key - machines, err := s.InspectMachines([]string{s.K0smotronNode(0)}) - s.Require().NoError(err) - s.Require().NotEmpty(machines) - - // Parse the cluster yaml as template - t, err := template.New("cluster").Parse(clusterYaml) - s.Require().NoError(err) - - k0sVersion := os.Getenv("K0S_VERSION") - - // Execute the template to buffer - var clusterYamlBuf bytes.Buffer - - err = t.Execute(&clusterYamlBuf, struct { - Address string - SSHKey string - K0SVersion string - }{ - Address: workerIP, - SSHKey: base64.StdEncoding.EncodeToString(s.privateKey), - K0SVersion: k0sVersion, - }) - s.Require().NoError(err) - bytes := clusterYamlBuf.Bytes() - // s.T().Logf("cluster yaml: %s", string(bytes)) - resources, err := util.ParseManifests(bytes) - s.Require().NoError(err) - s.Require().NotEmpty(resources) - - dynClient, err := util.GetDynamicClient(s.restConfig) - s.Require().NoError(err) - s.Require().NotNil(dynClient) - - err = util.CreateResources(s.Context(), resources, s.client, dynClient) - s.Require().NoError(err) -} - -func (s *RemoteMachineSuite) getWorkerIP() string { - nodeName := s.K0smotronNode(0) - ssh, err := s.SSH(s.Context(), nodeName) - s.Require().NoError(err) - defer ssh.Disconnect() - - ipAddress, err := ssh.ExecWithOutput(s.Context(), "hostname -i") - s.Require().NoError(err) - return ipAddress -} - -var clusterYaml = `apiVersion: cluster.x-k8s.io/v1beta1 -kind: Cluster -metadata: - name: remote-test - namespace: default -spec: - clusterNetwork: - pods: - cidrBlocks: - - 192.168.0.0/16 - serviceDomain: cluster.local - services: - cidrBlocks: - - 10.128.0.0/12 - controlPlaneRef: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 - kind: K0smotronControlPlane - name: remote-test - infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 - kind: RemoteCluster - name: remote-test ---- -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 -kind: K0smotronControlPlane -metadata: - name: remote-test - namespace: default -spec: - version: {{ .K0SVersion }}-k0s.0 - persistence: - type: emptyDir - service: - type: NodePort ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: RemoteCluster -metadata: - name: remote-test - namespace: default -spec: ---- -apiVersion: cluster.x-k8s.io/v1beta1 -kind: Machine -metadata: - name: remote-test-0 - namespace: default -spec: - clusterName: remote-test - bootstrap: - configRef: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 - kind: K0sWorkerConfig - name: remote-test-0 - infrastructureRef: - apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 - kind: RemoteMachine - name: remote-test-0 ---- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 -kind: K0sWorkerConfig -metadata: - name: remote-test-0 - namespace: default -spec: - version: {{ .K0SVersion }}+k0s.0 ---- -apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 -kind: RemoteMachine -metadata: - name: remote-test-0 - namespace: default -spec: - address: {{ .Address }} - port: 22 - user: root - sshKeyRef: - name: footloose-key ---- -apiVersion: v1 -kind: Secret -metadata: - name: footloose-key - namespace: default -data: - value: {{ .SSHKey }} -type: Opaque -`