diff --git a/.github/workflows/dependabot.yml b/.github/workflows/dependabot.yml
deleted file mode 100644
index 540aa491e694..000000000000
--- a/.github/workflows/dependabot.yml
+++ /dev/null
@@ -1,43 +0,0 @@
-name: dependabot
-
-on:
- pull_request:
- branches:
- - dependabot/**
- push:
- branches:
- - dependabot/**
- workflow_dispatch:
-
-jobs:
- build:
- name: Build
- runs-on: ubuntu-latest
- steps:
- - name: Set up Go 1.x
- uses: actions/setup-go@v2
- with:
- go-version: '1.17'
- id: go
- - name: Check out code into the Go module directory
- uses: actions/checkout@v2
- - uses: actions/cache@v2
- name: Restore go cache
- with:
- path: |
- ~/.cache/go-build
- ~/go/pkg/mod
- key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
- restore-keys: |
- ${{ runner.os }}-go-
- - name: Update all modules
- run: make modules
- - name: Update generated code
- run: make generate
- - uses: EndBug/add-and-commit@v7
- name: Commit changes
- with:
- author_name: dependabot[bot]
- author_email: 49699333+dependabot[bot]@users.noreply.github.com
- default_author: github_actor
- message: 'Update generated code'
diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml
deleted file mode 100644
index 7288be9a0d69..000000000000
--- a/.github/workflows/golangci-lint.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-name: golangci-lint
-on:
- pull_request:
- types: [opened, edited, synchronize, reopened]
-jobs:
- golangci:
- name: lint
- runs-on: ubuntu-latest
- strategy:
- matrix:
- working-directory:
- - ""
- - test
- - hack/tools
- steps:
- - uses: actions/checkout@v2
- - name: golangci-lint
- uses: golangci/golangci-lint-action@v2
- with:
- version: v1.43.0
- working-directory: ${{matrix.working-directory}}
diff --git a/.github/workflows/lint-docs.yaml b/.github/workflows/lint-docs.yaml
deleted file mode 100644
index 8b46d3861a1f..000000000000
--- a/.github/workflows/lint-docs.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
-name: Check Markdown links
-
-on:
- pull_request:
- types: [opened, edited, synchronize, reopened]
- paths:
- - '**.md'
-
-jobs:
- markdown-link-check:
- name: Broken Links
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@master
- - uses: gaurav-nelson/github-action-markdown-link-check@v1
- with:
- config-file: .markdownlinkcheck.json
\ No newline at end of file
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
deleted file mode 100644
index f3e34538f9ff..000000000000
--- a/.github/workflows/release.yml
+++ /dev/null
@@ -1,35 +0,0 @@
-on:
- push:
- # Sequence of patterns matched against refs/tags
- tags:
- - 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10
-
-name: release
-
-jobs:
- build:
- name: create draft release
- runs-on: ubuntu-latest
- steps:
- - name: Set env
- run: echo "RELEASE_TAG=${GITHUB_REF:10}" >> $GITHUB_ENV
- - name: checkout code
- uses: actions/checkout@v2
- with:
- fetch-depth: 0
- - name: Install go
- uses: actions/setup-go@v2
- with:
- go-version: '^1.17'
- - name: generate release artifacts
- run: |
- make release
- - name: generate release notes
- run: |
- make release-notes
- - name: Release
- uses: softprops/action-gh-release@v1
- with:
- draft: true
- files: out/*
- body_path: _releasenotes/${{ env.RELEASE_TAG }}.md
diff --git a/.github/workflows/update-homebrew-formula-on-release.yml b/.github/workflows/update-homebrew-formula-on-release.yml
deleted file mode 100644
index 908451053737..000000000000
--- a/.github/workflows/update-homebrew-formula-on-release.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-name: Update Homebrew Formula On Release
-
-on:
- release:
- types: [released]
-
-jobs:
- update-homebrew-formula-on-release:
- runs-on: macos-latest
- steps:
- - name: Update Homebrew formula
- uses: dawidd6/action-homebrew-bump-formula@v3
- with:
- token: ${{secrets.HOMEBREW_UPDATE_TOKEN}}
- formula: clusterctl
- tag: ${{github.ref}}
- revision: ${{github.sha}}
diff --git a/.github/workflows/verify.yml b/.github/workflows/verify.yml
deleted file mode 100644
index 3e57ce1d0276..000000000000
--- a/.github/workflows/verify.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-on:
- pull_request_target:
- types: [opened, edited, synchronize, reopened]
-
-jobs:
- verify:
- runs-on: ubuntu-latest
- name: verify PR contents
- steps:
- - name: Verifier action
- id: verifier
- uses: kubernetes-sigs/kubebuilder-release-tools@v0.1
- with:
- github_token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.markdownlinkcheck.json b/.markdownlinkcheck.json
index 0d47508f3ee7..78da0b7b6e15 100644
--- a/.markdownlinkcheck.json
+++ b/.markdownlinkcheck.json
@@ -1,10 +1,15 @@
{
- "ignorePatterns": [
- {
+ "ignorePatterns": [{
"pattern": "^http://localhost"
+ }],
+ "httpHeaders": [{
+ "comment": "Workaround as suggested here: https://github.com/tcort/markdown-link-check/issues/201",
+ "urls": ["https://docs.github.com/"],
+ "headers": {
+ "Accept-Encoding": "zstd, br, gzip, deflate"
}
-],
- "timeout": "5s",
+ }],
+ "timeout": "10s",
"retryOn429": true,
"retryCount": 5,
"fallbackRetryDelay": "30s",
diff --git a/Makefile b/Makefile
index 63a7a96d95e1..1711e6e04c46 100644
--- a/Makefile
+++ b/Makefile
@@ -757,4 +757,4 @@ $(KUSTOMIZE): # Download kustomize using hack script into tools folder.
$(GOLANGCI_LINT): .github/workflows/golangci-lint.yml # Download golanci-lint using hack script into tools folder.
hack/ensure-golangci-lint.sh \
-b $(TOOLS_DIR)/$(BIN_DIR) \
- $(shell cat .github/workflows/golangci-lint.yml | grep version | sed 's/.*version: //')
+ $(shell cat .github/workflows/golangci-lint.yml | grep [[:space:]]version | sed 's/.*version: //')
diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES
index 1d786f01c081..4c3df3cb94cb 100644
--- a/OWNERS_ALIASES
+++ b/OWNERS_ALIASES
@@ -22,6 +22,7 @@ aliases:
- CecileRobertMichon
- enxebre
- fabriziopandini
+ - sbueringer
- vincepri
# folks who can review and LGTM any PRs in the repo
@@ -45,6 +46,7 @@ aliases:
cluster-api-bootstrap-provider-kubeadm-maintainers:
cluster-api-bootstrap-provider-kubeadm-reviewers:
+ - killianmuldoon
# -----------------------------------------------------------
# OWNER_ALIASES for bootstrap/kubeadm/internal/ignition
@@ -69,6 +71,7 @@ aliases:
cluster-api-clusterctl-maintainers:
cluster-api-clusterctl-reviewers:
+ - ykakarap
# -----------------------------------------------------------
# OWNER_ALIASES for test
@@ -76,7 +79,6 @@ aliases:
cluster-api-test-reviewers:
cluster-api-test-maintainers:
- - sbueringer
# -----------------------------------------------------------
# OWNER_ALIASES for test/framework
@@ -97,4 +99,5 @@ aliases:
# -----------------------------------------------------------
cluster-api-docs-reviewers:
+ - killianmuldoon
cluster-api-docs-maintainers:
diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go
index a7be733a6ecb..1452fcb3f3bb 100644
--- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go
+++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller.go
@@ -246,9 +246,10 @@ func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques
// Status is ready means a config has been generated.
case config.Status.Ready:
if config.Spec.JoinConfiguration != nil && config.Spec.JoinConfiguration.Discovery.BootstrapToken != nil {
- if !configOwner.IsInfrastructureReady() {
- // If the BootstrapToken has been generated for a join and the infrastructure is not ready.
- // This indicates the token in the join config has not been consumed and it may need a refresh.
+ if !configOwner.HasNodeRefs() {
+ // If the BootstrapToken has been generated for a join but the config owner has no nodeRefs,
+ // this indicates that the node has not yet joined and the token in the join config has not
+ // been consumed and it may need a refresh.
return r.refreshBootstrapToken(ctx, config, cluster)
}
if configOwner.IsMachinePool() {
diff --git a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go
index d01a26fbc273..683832681359 100644
--- a/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go
+++ b/bootstrap/kubeadm/internal/controllers/kubeadmconfig_controller_test.go
@@ -944,7 +944,7 @@ func TestBootstrapTokenTTLExtension(t *testing.T) {
tokenExpires[i] = item.Data[bootstrapapi.BootstrapTokenExpirationKey]
}
- // ...until the infrastructure is marked "ready"
+ // ...the infrastructure is marked "ready", but token should still be refreshed...
patchHelper, err := patch.NewHelper(workerMachine, myclient)
g.Expect(err).ShouldNot(HaveOccurred())
workerMachine.Status.InfrastructureReady = true
@@ -957,6 +957,56 @@ func TestBootstrapTokenTTLExtension(t *testing.T) {
<-time.After(1 * time.Second)
+ for _, req := range []ctrl.Request{
+ {
+ NamespacedName: client.ObjectKey{
+ Namespace: metav1.NamespaceDefault,
+ Name: "worker-join-cfg",
+ },
+ },
+ {
+ NamespacedName: client.ObjectKey{
+ Namespace: metav1.NamespaceDefault,
+ Name: "control-plane-join-cfg",
+ },
+ },
+ } {
+ result, err := k.Reconcile(ctx, req)
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(result.RequeueAfter).NotTo(BeNumerically(">=", k.TokenTTL))
+ }
+
+ l = &corev1.SecretList{}
+ err = myclient.List(ctx, l, client.ListOption(client.InNamespace(metav1.NamespaceSystem)))
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(len(l.Items)).To(Equal(2))
+
+ for i, item := range l.Items {
+ g.Expect(bytes.Equal(tokenExpires[i], item.Data[bootstrapapi.BootstrapTokenExpirationKey])).To(BeFalse())
+ tokenExpires[i] = item.Data[bootstrapapi.BootstrapTokenExpirationKey]
+ }
+
+ // ...until the Nodes have actually joined the cluster and we get a nodeRef
+ patchHelper, err = patch.NewHelper(workerMachine, myclient)
+ g.Expect(err).ShouldNot(HaveOccurred())
+ workerMachine.Status.NodeRef = &corev1.ObjectReference{
+ APIVersion: "v1",
+ Kind: "Node",
+ Name: "worker-node",
+ }
+ g.Expect(patchHelper.Patch(ctx, workerMachine)).To(Succeed())
+
+ patchHelper, err = patch.NewHelper(controlPlaneJoinMachine, myclient)
+ g.Expect(err).ShouldNot(HaveOccurred())
+ controlPlaneJoinMachine.Status.NodeRef = &corev1.ObjectReference{
+ APIVersion: "v1",
+ Kind: "Node",
+ Name: "control-plane-node",
+ }
+ g.Expect(patchHelper.Patch(ctx, controlPlaneJoinMachine)).To(Succeed())
+
+ <-time.After(1 * time.Second)
+
for _, req := range []ctrl.Request{
{
NamespacedName: client.ObjectKey{
@@ -1068,7 +1118,7 @@ func TestBootstrapTokenRotationMachinePool(t *testing.T) {
tokenExpires[i] = item.Data[bootstrapapi.BootstrapTokenExpirationKey]
}
- // ...until the infrastructure is marked "ready"
+ // ...the infrastructure is marked "ready", but token should still be refreshed...
patchHelper, err := patch.NewHelper(workerMachinePool, myclient)
g.Expect(err).ShouldNot(HaveOccurred())
workerMachinePool.Status.InfrastructureReady = true
@@ -1076,6 +1126,38 @@ func TestBootstrapTokenRotationMachinePool(t *testing.T) {
<-time.After(1 * time.Second)
+ request = ctrl.Request{
+ NamespacedName: client.ObjectKey{
+ Namespace: metav1.NamespaceDefault,
+ Name: "workerpool-join-cfg",
+ },
+ }
+ result, err = k.Reconcile(ctx, request)
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(result.RequeueAfter).NotTo(BeNumerically(">=", k.TokenTTL))
+
+ l = &corev1.SecretList{}
+ err = myclient.List(ctx, l, client.ListOption(client.InNamespace(metav1.NamespaceSystem)))
+ g.Expect(err).NotTo(HaveOccurred())
+ g.Expect(len(l.Items)).To(Equal(1))
+
+ for i, item := range l.Items {
+ g.Expect(bytes.Equal(tokenExpires[i], item.Data[bootstrapapi.BootstrapTokenExpirationKey])).To(BeFalse())
+ tokenExpires[i] = item.Data[bootstrapapi.BootstrapTokenExpirationKey]
+ }
+
+ // ...until all nodes have joined
+ workerMachinePool.Status.NodeRefs = []corev1.ObjectReference{
+ {
+ Kind: "Node",
+ Namespace: metav1.NamespaceDefault,
+ Name: "node-0",
+ },
+ }
+ g.Expect(patchHelper.Patch(ctx, workerMachinePool, patch.WithStatusObservedGeneration{})).To(Succeed())
+
+ <-time.After(1 * time.Second)
+
request = ctrl.Request{
NamespacedName: client.ObjectKey{
Namespace: metav1.NamespaceDefault,
diff --git a/bootstrap/util/configowner.go b/bootstrap/util/configowner.go
index 82feaab61b0f..1d42e4c7e7c6 100644
--- a/bootstrap/util/configowner.go
+++ b/bootstrap/util/configowner.go
@@ -47,6 +47,32 @@ func (co ConfigOwner) IsInfrastructureReady() bool {
return infrastructureReady
}
+// HasNodeRefs checks if the config owner has nodeRefs. For a Machine this means
+// that it has a nodeRef. For a MachinePool it means that it has as many nodeRefs
+// as there are replicas.
+func (co ConfigOwner) HasNodeRefs() bool {
+ if co.IsMachinePool() {
+ numExpectedNodes, found, err := unstructured.NestedInt64(co.Object, "spec", "replicas")
+ if err != nil {
+ return false
+ }
+ // replicas default to 1 so this is what we should use if nothing is specified
+ if !found {
+ numExpectedNodes = 1
+ }
+ nodeRefs, _, err := unstructured.NestedSlice(co.Object, "status", "nodeRefs")
+ if err != nil {
+ return false
+ }
+ return len(nodeRefs) == int(numExpectedNodes)
+ }
+ nodeRef, _, err := unstructured.NestedMap(co.Object, "status", "nodeRef")
+ if err != nil {
+ return false
+ }
+ return len(nodeRef) > 0
+}
+
// ClusterName extracts spec.clusterName from the config owner.
func (co ConfigOwner) ClusterName() string {
clusterName, _, err := unstructured.NestedString(co.Object, "spec", "clusterName")
diff --git a/bootstrap/util/configowner_test.go b/bootstrap/util/configowner_test.go
index 4ca61d73c867..cb4d359a8a98 100644
--- a/bootstrap/util/configowner_test.go
+++ b/bootstrap/util/configowner_test.go
@@ -20,7 +20,10 @@ import (
"testing"
. "github.com/onsi/gomega"
+ corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
"k8s.io/utils/pointer"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
@@ -163,3 +166,199 @@ func TestGetConfigOwner(t *testing.T) {
g.Expect(configOwner).To(BeNil())
})
}
+
+func TestHasNodeRefs(t *testing.T) {
+ t.Run("should return false if there is no nodeRef", func(t *testing.T) {
+ g := NewWithT(t)
+ machine := &clusterv1.Machine{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Machine",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "machine-name",
+ Namespace: metav1.NamespaceDefault,
+ },
+ }
+ content, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&machine)
+ if err != nil {
+ g.Fail(err.Error())
+ }
+ unstructuredOwner := unstructured.Unstructured{}
+ unstructuredOwner.SetUnstructuredContent(content)
+ co := ConfigOwner{&unstructuredOwner}
+ result := co.HasNodeRefs()
+ g.Expect(result).To(Equal(false))
+ })
+ t.Run("should return true if there is a nodeRef for Machine", func(t *testing.T) {
+ g := NewWithT(t)
+ machine := &clusterv1.Machine{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "Machine",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "machine-name",
+ Namespace: metav1.NamespaceDefault,
+ },
+ Status: clusterv1.MachineStatus{
+ InfrastructureReady: true,
+ NodeRef: &corev1.ObjectReference{
+ Kind: "Node",
+ Namespace: metav1.NamespaceDefault,
+ Name: "node-0",
+ },
+ },
+ }
+ content, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&machine)
+ if err != nil {
+ g.Fail(err.Error())
+ }
+ unstructuredOwner := unstructured.Unstructured{}
+ unstructuredOwner.SetUnstructuredContent(content)
+ co := ConfigOwner{&unstructuredOwner}
+
+ result := co.HasNodeRefs()
+ g.Expect(result).To(Equal(true))
+ })
+ t.Run("should return false if nodes are missing from MachinePool", func(t *testing.T) {
+ g := NewWithT(t)
+ machinePools := []expv1.MachinePool{
+ {
+ // No replicas specified (default is 1). No nodeRefs either.
+ TypeMeta: metav1.TypeMeta{
+ Kind: "MachinePool",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: metav1.NamespaceDefault,
+ Name: "machine-pool-name",
+ },
+ },
+ {
+ // 1 replica but no nodeRefs
+ TypeMeta: metav1.TypeMeta{
+ Kind: "MachinePool",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: metav1.NamespaceDefault,
+ Name: "machine-pool-name",
+ },
+ Spec: expv1.MachinePoolSpec{
+ Replicas: pointer.Int32(1),
+ },
+ },
+ {
+ // 2 replicas but only 1 nodeRef
+ TypeMeta: metav1.TypeMeta{
+ Kind: "MachinePool",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: metav1.NamespaceDefault,
+ Name: "machine-pool-name",
+ },
+ Spec: expv1.MachinePoolSpec{
+ Replicas: pointer.Int32(2),
+ },
+ Status: expv1.MachinePoolStatus{
+ NodeRefs: []corev1.ObjectReference{
+ {
+ Kind: "Node",
+ Namespace: metav1.NamespaceDefault,
+ Name: "node-0",
+ },
+ },
+ },
+ },
+ }
+
+ for _, machinePool := range machinePools {
+ mp := machinePool
+ content, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&mp)
+ if err != nil {
+ g.Fail(err.Error())
+ }
+ unstructuredOwner := unstructured.Unstructured{}
+ unstructuredOwner.SetUnstructuredContent(content)
+ co := ConfigOwner{&unstructuredOwner}
+
+ result := co.HasNodeRefs()
+ g.Expect(result).To(Equal(false))
+ }
+ })
+ t.Run("should return true if MachinePool has nodeRefs for all replicas", func(t *testing.T) {
+ g := NewWithT(t)
+ machinePools := []expv1.MachinePool{
+ {
+ // 1 replica (default) and 1 nodeRef
+ TypeMeta: metav1.TypeMeta{
+ Kind: "MachinePool",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: metav1.NamespaceDefault,
+ Name: "machine-pool-name",
+ },
+ Status: expv1.MachinePoolStatus{
+ NodeRefs: []corev1.ObjectReference{
+ {
+ Kind: "Node",
+ Namespace: metav1.NamespaceDefault,
+ Name: "node-0",
+ },
+ },
+ },
+ },
+ {
+ // 2 replicas and nodeRefs
+ TypeMeta: metav1.TypeMeta{
+ Kind: "MachinePool",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: metav1.NamespaceDefault,
+ Name: "machine-pool-name",
+ },
+ Spec: expv1.MachinePoolSpec{
+ Replicas: pointer.Int32(2),
+ },
+ Status: expv1.MachinePoolStatus{
+ NodeRefs: []corev1.ObjectReference{
+ {
+ Kind: "Node",
+ Namespace: metav1.NamespaceDefault,
+ Name: "node-0",
+ },
+ {
+ Kind: "Node",
+ Namespace: metav1.NamespaceDefault,
+ Name: "node-1",
+ },
+ },
+ },
+ },
+ {
+ // 0 replicas and 0 nodeRef
+ TypeMeta: metav1.TypeMeta{
+ Kind: "MachinePool",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Namespace: metav1.NamespaceDefault,
+ Name: "machine-pool-name",
+ },
+ Spec: expv1.MachinePoolSpec{
+ Replicas: pointer.Int32(0),
+ },
+ },
+ }
+
+ for _, machinePool := range machinePools {
+ mp := machinePool
+ content, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&mp)
+ if err != nil {
+ g.Fail(err.Error())
+ }
+ unstructuredOwner := unstructured.Unstructured{}
+ unstructuredOwner.SetUnstructuredContent(content)
+ co := ConfigOwner{&unstructuredOwner}
+
+ result := co.HasNodeRefs()
+ g.Expect(result).To(Equal(true))
+ }
+ })
+}
diff --git a/cmd/clusterctl/client/cluster/cert_manager.go b/cmd/clusterctl/client/cluster/cert_manager.go
index 693f6589ad63..8f69c96916e8 100644
--- a/cmd/clusterctl/client/cluster/cert_manager.go
+++ b/cmd/clusterctl/client/cluster/cert_manager.go
@@ -253,6 +253,12 @@ func (cm *certManagerClient) EnsureLatestVersion() error {
return nil
}
+ // Migrate CRs to latest CRD storage version, if necessary.
+ // Note: We have to do this before cert-manager is deleted so conversion webhooks still work.
+ if err := cm.migrateCRDs(); err != nil {
+ return err
+ }
+
// delete the cert-manager version currently installed (because it should be upgraded);
// NOTE: CRDs, and namespace are preserved in order to avoid deletion of user objects;
// web-hooks are preserved to avoid a user attempting to CREATE a cert-manager resource while the upgrade is in progress.
@@ -265,6 +271,30 @@ func (cm *certManagerClient) EnsureLatestVersion() error {
return cm.install()
}
+func (cm *certManagerClient) migrateCRDs() error {
+ config, err := cm.configClient.CertManager().Get()
+ if err != nil {
+ return err
+ }
+
+ // Gets the new cert-manager components from the repository.
+ objs, err := cm.getManifestObjs(config)
+ if err != nil {
+ return err
+ }
+
+ c, err := cm.proxy.NewClient()
+ if err != nil {
+ return err
+ }
+
+ if err := newCRDMigrator(c).Run(ctx, objs); err != nil {
+ return err
+ }
+
+ return nil
+}
+
func (cm *certManagerClient) deleteObjs(objs []unstructured.Unstructured) error {
deleteCertManagerBackoff := newWriteBackoff()
for i := range objs {
diff --git a/cmd/clusterctl/client/cluster/crd_migration.go b/cmd/clusterctl/client/cluster/crd_migration.go
new file mode 100644
index 000000000000..135bd98dbe1d
--- /dev/null
+++ b/cmd/clusterctl/client/cluster/crd_migration.go
@@ -0,0 +1,224 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cluster
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/pkg/errors"
+ apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/util/rand"
+ "k8s.io/apimachinery/pkg/util/sets"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ "sigs.k8s.io/cluster-api/cmd/clusterctl/internal/scheme"
+ logf "sigs.k8s.io/cluster-api/cmd/clusterctl/log"
+)
+
+// crdMigrator migrates CRs to the storage version of new CRDs.
+// This is necessary when the new CRD drops a version which
+// was previously used as a storage version.
+type crdMigrator struct {
+ Client client.Client
+}
+
+// newCRDMigrator creates a new CRD migrator.
+func newCRDMigrator(client client.Client) *crdMigrator {
+ return &crdMigrator{
+ Client: client,
+ }
+}
+
+// Run migrates CRs to the storage version of new CRDs.
+// This is necessary when the new CRD drops a version which
+// was previously used as a storage version.
+func (m *crdMigrator) Run(ctx context.Context, objs []unstructured.Unstructured) error {
+ for i := range objs {
+ obj := objs[i]
+
+ if obj.GetKind() == "CustomResourceDefinition" {
+ crd := &apiextensionsv1.CustomResourceDefinition{}
+ if err := scheme.Scheme.Convert(&obj, crd, nil); err != nil {
+ return errors.Wrapf(err, "failed to convert CRD %q", obj.GetName())
+ }
+
+ if _, err := m.run(ctx, crd); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// run migrates CRs of a new CRD.
+// This is necessary when the new CRD drops a version which
+// was previously used as a storage version.
+func (m *crdMigrator) run(ctx context.Context, newCRD *apiextensionsv1.CustomResourceDefinition) (bool, error) {
+ log := logf.Log
+
+ // Gets the list of version supported by the new CRD
+ newVersions := sets.NewString()
+ for _, version := range newCRD.Spec.Versions {
+ newVersions.Insert(version.Name)
+ }
+
+ // Get the current CRD.
+ currentCRD := &apiextensionsv1.CustomResourceDefinition{}
+ if err := retryWithExponentialBackoff(newReadBackoff(), func() error {
+ return m.Client.Get(ctx, client.ObjectKeyFromObject(newCRD), currentCRD)
+ }); err != nil {
+ // Return if the CRD doesn't exist yet. We only have to migrate if the CRD exists already.
+ if apierrors.IsNotFound(err) {
+ return false, nil
+ }
+ return false, err
+ }
+
+ // Get the storage version of the current CRD.
+ currentStorageVersion, err := storageVersionForCRD(currentCRD)
+ if err != nil {
+ return false, err
+ }
+
+ // Return an error, if the current storage version has been dropped in the new CRD.
+ if !newVersions.Has(currentStorageVersion) {
+ return false, errors.Errorf("unable to upgrade CRD %q because the new CRD does not contain the storage version %q of the current CRD, thus not allowing CR migration", newCRD.Name, currentStorageVersion)
+ }
+
+ currentStatusStoredVersions := sets.NewString(currentCRD.Status.StoredVersions...)
+
+ // If the new CRD still contains all current stored versions, nothing to do
+ // as no previous storage version will be dropped.
+ if newVersions.HasAll(currentStatusStoredVersions.List()...) {
+ log.V(2).Info("CRD migration check passed", "name", newCRD.Name)
+ return false, nil
+ }
+
+ // Otherwise a version that has been used as storage version will be dropped, so it is necessary to migrate all the
+ // objects and drop the storage version from the current CRD status before installing the new CRD.
+ // Ref https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/#writing-reading-and-updating-versioned-customresourcedefinition-objects
+ // Note: We are simply migrating all CR objects independent of the version in which they are actually stored in etcd.
+ // This way we can make sure that all CR objects are now stored in the current storage version.
+ // Alternatively, we would have to figure out which objects are stored in which version but this information is not
+ // exposed by the apiserver.
+ storedVersionsToDelete := currentStatusStoredVersions.Difference(newVersions)
+ storedVersionsToPreserve := currentStatusStoredVersions.Intersection(newVersions)
+ log.Info("CR migration required", "kind", newCRD.Spec.Names.Kind, "storedVersionsToDelete", strings.Join(storedVersionsToDelete.List(), ","), "storedVersionsToPreserve", strings.Join(storedVersionsToPreserve.List(), ","))
+
+ if err := m.migrateResourcesForCRD(ctx, currentCRD, currentStorageVersion); err != nil {
+ return false, err
+ }
+
+ if err := m.patchCRDStoredVersions(ctx, currentCRD, currentStorageVersion); err != nil {
+ return false, err
+ }
+
+ return true, nil
+}
+
+func (m *crdMigrator) migrateResourcesForCRD(ctx context.Context, crd *apiextensionsv1.CustomResourceDefinition, currentStorageVersion string) error {
+ log := logf.Log
+ log.Info("Migrating CRs, this operation may take a while...", "kind", crd.Spec.Names.Kind)
+
+ list := &unstructured.UnstructuredList{}
+ list.SetGroupVersionKind(schema.GroupVersionKind{
+ Group: crd.Spec.Group,
+ Version: currentStorageVersion,
+ Kind: crd.Spec.Names.ListKind,
+ })
+
+ var i int
+ for {
+ if err := retryWithExponentialBackoff(newReadBackoff(), func() error {
+ return m.Client.List(ctx, list, client.Continue(list.GetContinue()))
+ }); err != nil {
+ return errors.Wrapf(err, "failed to list %q", list.GetKind())
+ }
+
+ for i := range list.Items {
+ obj := list.Items[i]
+
+ log.V(5).Info("Migrating", logf.UnstructuredToValues(obj)...)
+ if err := retryWithExponentialBackoff(newWriteBackoff(), func() error {
+ return handleMigrateErr(m.Client.Update(ctx, &obj))
+ }); err != nil {
+ return errors.Wrapf(err, "failed to migrate %s/%s", obj.GetNamespace(), obj.GetName())
+ }
+
+ // Add some random delays to avoid pressure on the API server.
+ i++
+ if i%10 == 0 {
+ log.V(2).Info(fmt.Sprintf("%d objects migrated", i))
+ time.Sleep(time.Duration(rand.IntnRange(50*int(time.Millisecond), 250*int(time.Millisecond))))
+ }
+ }
+
+ if list.GetContinue() == "" {
+ break
+ }
+ }
+
+ log.V(2).Info(fmt.Sprintf("CR migration completed: migrated %d objects", i), "kind", crd.Spec.Names.Kind)
+ return nil
+}
+
+func (m *crdMigrator) patchCRDStoredVersions(ctx context.Context, crd *apiextensionsv1.CustomResourceDefinition, currentStorageVersion string) error {
+ crd.Status.StoredVersions = []string{currentStorageVersion}
+ if err := retryWithExponentialBackoff(newWriteBackoff(), func() error {
+ return m.Client.Status().Update(ctx, crd)
+ }); err != nil {
+ return errors.Wrapf(err, "failed to update status.storedVersions for CRD %q", crd.Name)
+ }
+ return nil
+}
+
+// handleMigrateErr will absorb certain types of errors that we know can be skipped/passed on
+// during a migration of a particular object.
+func handleMigrateErr(err error) error {
+ if err == nil {
+ return nil
+ }
+
+ // If the resource no longer exists, don't return the error as the object no longer
+ // needs updating to the new API version.
+ if apierrors.IsNotFound(err) {
+ return nil
+ }
+
+ // If there was a conflict, another client must have written the object already which
+ // means we don't need to force an update.
+ if apierrors.IsConflict(err) {
+ return nil
+ }
+ return err
+}
+
+// storageVersionForCRD discovers the storage version for a given CRD.
+func storageVersionForCRD(crd *apiextensionsv1.CustomResourceDefinition) (string, error) {
+ for _, v := range crd.Spec.Versions {
+ if v.Storage {
+ return v.Name, nil
+ }
+ }
+ return "", errors.Errorf("could not find storage version for CRD %q", crd.Name)
+}
diff --git a/cmd/clusterctl/client/cluster/crd_migration_test.go b/cmd/clusterctl/client/cluster/crd_migration_test.go
new file mode 100644
index 000000000000..0813751443a5
--- /dev/null
+++ b/cmd/clusterctl/client/cluster/crd_migration_test.go
@@ -0,0 +1,254 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cluster
+
+import (
+ "fmt"
+ "testing"
+
+ . "github.com/onsi/gomega"
+ "golang.org/x/net/context"
+ apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ "sigs.k8s.io/cluster-api/cmd/clusterctl/internal/test"
+)
+
+func Test_CRDMigrator(t *testing.T) {
+ tests := []struct {
+ name string
+ CRs []unstructured.Unstructured
+ currentCRD *apiextensionsv1.CustomResourceDefinition
+ newCRD *apiextensionsv1.CustomResourceDefinition
+ wantIsMigrated bool
+ wantStoredVersions []string
+ wantErr bool
+ }{
+ {
+ name: "No-op if current CRD does not exists",
+ currentCRD: &apiextensionsv1.CustomResourceDefinition{ObjectMeta: metav1.ObjectMeta{Name: "something else"}}, // There is currently no "foo" CRD
+ newCRD: &apiextensionsv1.CustomResourceDefinition{ObjectMeta: metav1.ObjectMeta{Name: "foo"}},
+ wantIsMigrated: false,
+ },
+ {
+ name: "Error if current CRD does not have a storage version",
+ currentCRD: &apiextensionsv1.CustomResourceDefinition{
+ ObjectMeta: metav1.ObjectMeta{Name: "foo"},
+ Spec: apiextensionsv1.CustomResourceDefinitionSpec{
+ Versions: []apiextensionsv1.CustomResourceDefinitionVersion{
+ {Name: "v1alpha1"}, // No storage version as storage is not set.
+ },
+ },
+ Status: apiextensionsv1.CustomResourceDefinitionStatus{StoredVersions: []string{"v1alpha1"}},
+ },
+ newCRD: &apiextensionsv1.CustomResourceDefinition{
+ ObjectMeta: metav1.ObjectMeta{Name: "foo"},
+ Spec: apiextensionsv1.CustomResourceDefinitionSpec{
+ Versions: []apiextensionsv1.CustomResourceDefinitionVersion{
+ {Name: "v1alpha1"},
+ },
+ },
+ },
+ wantErr: true,
+ wantIsMigrated: false,
+ },
+ {
+ name: "No-op if new CRD supports same versions",
+ currentCRD: &apiextensionsv1.CustomResourceDefinition{
+ ObjectMeta: metav1.ObjectMeta{Name: "foo"},
+ Spec: apiextensionsv1.CustomResourceDefinitionSpec{
+ Versions: []apiextensionsv1.CustomResourceDefinitionVersion{
+ {Name: "v1alpha1", Storage: true},
+ },
+ },
+ Status: apiextensionsv1.CustomResourceDefinitionStatus{StoredVersions: []string{"v1alpha1"}},
+ },
+ newCRD: &apiextensionsv1.CustomResourceDefinition{
+ ObjectMeta: metav1.ObjectMeta{Name: "foo"},
+ Spec: apiextensionsv1.CustomResourceDefinitionSpec{
+ Versions: []apiextensionsv1.CustomResourceDefinitionVersion{
+ {Name: "v1alpha1", Storage: true},
+ },
+ },
+ },
+ wantIsMigrated: false,
+ },
+ {
+ name: "No-op if new CRD adds a new versions",
+ currentCRD: &apiextensionsv1.CustomResourceDefinition{
+ ObjectMeta: metav1.ObjectMeta{Name: "foo"},
+ Spec: apiextensionsv1.CustomResourceDefinitionSpec{
+ Versions: []apiextensionsv1.CustomResourceDefinitionVersion{
+ {Name: "v1alpha1", Storage: true},
+ },
+ },
+ Status: apiextensionsv1.CustomResourceDefinitionStatus{StoredVersions: []string{"v1alpha1"}},
+ },
+ newCRD: &apiextensionsv1.CustomResourceDefinition{
+ ObjectMeta: metav1.ObjectMeta{Name: "foo"},
+ Spec: apiextensionsv1.CustomResourceDefinitionSpec{
+ Versions: []apiextensionsv1.CustomResourceDefinitionVersion{
+ {Name: "v1beta1", Storage: true}, // v1beta1 is being added
+ {Name: "v1alpha1"}, // v1alpha1 still exists
+ },
+ },
+ },
+ wantIsMigrated: false,
+ },
+ {
+ name: "Fails if new CRD drops current storage version",
+ currentCRD: &apiextensionsv1.CustomResourceDefinition{
+ ObjectMeta: metav1.ObjectMeta{Name: "foo"},
+ Spec: apiextensionsv1.CustomResourceDefinitionSpec{
+ Versions: []apiextensionsv1.CustomResourceDefinitionVersion{
+ {Name: "v1alpha1", Storage: true},
+ },
+ },
+ Status: apiextensionsv1.CustomResourceDefinitionStatus{StoredVersions: []string{"v1alpha1"}},
+ },
+ newCRD: &apiextensionsv1.CustomResourceDefinition{
+ ObjectMeta: metav1.ObjectMeta{Name: "foo"},
+ Spec: apiextensionsv1.CustomResourceDefinitionSpec{
+ Versions: []apiextensionsv1.CustomResourceDefinitionVersion{
+ {Name: "v1", Storage: true}, // CRD is jumping to v1, but dropping current storage version without allowing migration.
+ },
+ },
+ },
+ wantErr: true,
+ },
+ {
+ name: "Migrate",
+ CRs: []unstructured.Unstructured{
+ {
+ Object: map[string]interface{}{
+ "apiVersion": "foo/v1beta1",
+ "kind": "Foo",
+ "metadata": map[string]interface{}{
+ "name": "cr1",
+ "namespace": metav1.NamespaceDefault,
+ },
+ },
+ },
+ {
+ Object: map[string]interface{}{
+ "apiVersion": "foo/v1beta1",
+ "kind": "Foo",
+ "metadata": map[string]interface{}{
+ "name": "cr2",
+ "namespace": metav1.NamespaceDefault,
+ },
+ },
+ },
+ {
+ Object: map[string]interface{}{
+ "apiVersion": "foo/v1beta1",
+ "kind": "Foo",
+ "metadata": map[string]interface{}{
+ "name": "cr3",
+ "namespace": metav1.NamespaceDefault,
+ },
+ },
+ },
+ },
+ currentCRD: &apiextensionsv1.CustomResourceDefinition{
+ ObjectMeta: metav1.ObjectMeta{Name: "foo"},
+ Spec: apiextensionsv1.CustomResourceDefinitionSpec{
+ Group: "foo",
+ Names: apiextensionsv1.CustomResourceDefinitionNames{Kind: "Foo", ListKind: "FooList"},
+ Versions: []apiextensionsv1.CustomResourceDefinitionVersion{
+ {Name: "v1beta1", Storage: true},
+ {Name: "v1alpha1"},
+ },
+ },
+ Status: apiextensionsv1.CustomResourceDefinitionStatus{StoredVersions: []string{"v1beta1", "v1alpha1"}},
+ },
+ newCRD: &apiextensionsv1.CustomResourceDefinition{
+ ObjectMeta: metav1.ObjectMeta{Name: "foo"},
+ Spec: apiextensionsv1.CustomResourceDefinitionSpec{
+ Group: "foo",
+ Names: apiextensionsv1.CustomResourceDefinitionNames{Kind: "Foo", ListKind: "FooList"},
+ Versions: []apiextensionsv1.CustomResourceDefinitionVersion{
+ {Name: "v1", Storage: true}, // v1 is being added
+ {Name: "v1beta1"}, // v1beta1 still there (required for migration)
+ // v1alpha1 is being dropped
+ },
+ },
+ },
+ wantStoredVersions: []string{"v1beta1"}, // v1alpha1 should be dropped from current CRD's stored versions
+ wantIsMigrated: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ g := NewWithT(t)
+
+ objs := []client.Object{tt.currentCRD}
+ for i := range tt.CRs {
+ objs = append(objs, &tt.CRs[i])
+ }
+
+ c, err := test.NewFakeProxy().WithObjs(objs...).NewClient()
+ g.Expect(err).ToNot(HaveOccurred())
+ countingClient := newUpgradeCountingClient(c)
+
+ m := crdMigrator{
+ Client: countingClient,
+ }
+
+ isMigrated, err := m.run(ctx, tt.newCRD)
+ if tt.wantErr {
+ g.Expect(err).To(HaveOccurred())
+ } else {
+ g.Expect(err).ToNot(HaveOccurred())
+ }
+ g.Expect(isMigrated).To(Equal(tt.wantIsMigrated))
+
+ if isMigrated {
+ storageVersion, err := storageVersionForCRD(tt.currentCRD)
+ g.Expect(err).ToNot(HaveOccurred())
+
+ // Check all the objects has been migrated.
+ g.Expect(countingClient.count).To(HaveKeyWithValue(fmt.Sprintf("%s/%s, Kind=%s", tt.currentCRD.Spec.Group, storageVersion, tt.currentCRD.Spec.Names.Kind), len(tt.CRs)))
+
+ // Check storage versions has been cleaned up.
+ currentCRD := &apiextensionsv1.CustomResourceDefinition{}
+ err = c.Get(ctx, client.ObjectKeyFromObject(tt.newCRD), currentCRD)
+ g.Expect(err).ToNot(HaveOccurred())
+ g.Expect(currentCRD.Status.StoredVersions).To(Equal(tt.wantStoredVersions))
+ }
+ })
+ }
+}
+
+type UpgradeCountingClient struct {
+ count map[string]int
+ client.Client
+}
+
+func newUpgradeCountingClient(inner client.Client) UpgradeCountingClient {
+ return UpgradeCountingClient{
+ count: map[string]int{},
+ Client: inner,
+ }
+}
+
+func (u UpgradeCountingClient) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error {
+ u.count[obj.GetObjectKind().GroupVersionKind().String()]++
+ return u.Client.Update(ctx, obj, opts...)
+}
diff --git a/cmd/clusterctl/client/cluster/topology.go b/cmd/clusterctl/client/cluster/topology.go
index c5ea2722757f..b8df88d722bd 100644
--- a/cmd/clusterctl/client/cluster/topology.go
+++ b/cmd/clusterctl/client/cluster/topology.go
@@ -222,6 +222,12 @@ func (t *topologyClient) Plan(in *TopologyPlanInput) (*TopologyPlanOutput, error
// - no more than 1 cluster in the input.
// - no more than 1 clusterclass in the input.
func (t *topologyClient) validateInput(in *TopologyPlanInput) error {
+ // Check if objects of the same Group.Kind are using the same version.
+ // Note: This is because the dryrun client does not guarantee any coversions.
+ if !hasUniqueVersionPerGroupKind(in.Objs) {
+ return fmt.Errorf("objects of the same Group.Kind should use the same apiVersion")
+ }
+
// Check all the objects in the input belong to the same namespace.
// Note: It is okay if all the objects in the input do not have any namespace.
// In such case, the list of unique namespaces will be [""].
@@ -501,6 +507,7 @@ func (t *topologyClient) generateCRDs(objs []*unstructured.Unstructured) []*apie
crds := []*apiextensionsv1.CustomResourceDefinition{}
crdMap := map[string]bool{}
var gvk schema.GroupVersionKind
+
for _, obj := range objs {
gvk = obj.GroupVersionKind()
if strings.HasSuffix(gvk.Group, ".cluster.x-k8s.io") && !crdMap[gvk.String()] {
@@ -512,7 +519,8 @@ func (t *topologyClient) generateCRDs(objs []*unstructured.Unstructured) []*apie
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s.%s", flect.Pluralize(strings.ToLower(gvk.Kind)), gvk.Group),
Labels: map[string]string{
- clusterv1.GroupVersion.String(): clusterv1.GroupVersion.Version,
+ // Here we assume that all the versions are compatible with the Cluster API contract version.
+ clusterv1.GroupVersion.String(): gvk.Version,
},
},
}
@@ -520,6 +528,7 @@ func (t *topologyClient) generateCRDs(objs []*unstructured.Unstructured) []*apie
crdMap[gvk.String()] = true
}
}
+
return crds
}
@@ -698,3 +707,16 @@ func uniqueNamespaces(objs []*unstructured.Unstructured) []string {
}
return ns.List()
}
+
+func hasUniqueVersionPerGroupKind(objs []*unstructured.Unstructured) bool {
+ versionMap := map[string]string{}
+ for _, obj := range objs {
+ gvk := obj.GroupVersionKind()
+ gk := gvk.GroupKind().String()
+ if ver, ok := versionMap[gk]; ok && ver != gvk.Version {
+ return false
+ }
+ versionMap[gk] = gvk.Version
+ }
+ return true
+}
diff --git a/cmd/clusterctl/client/cluster/upgrader.go b/cmd/clusterctl/client/cluster/upgrader.go
index 7e7100328745..e83d7a282570 100644
--- a/cmd/clusterctl/client/cluster/upgrader.go
+++ b/cmd/clusterctl/client/cluster/upgrader.go
@@ -360,6 +360,31 @@ func (u *providerUpgrader) doUpgrade(upgradePlan *UpgradePlan) error {
return providers[a].GetProviderType().Order() < providers[b].GetProviderType().Order()
})
+ // Migrate CRs to latest CRD storage version, if necessary.
+ // Note: We have to do this before the providers are scaled down or deleted
+ // so conversion webhooks still work.
+ for _, upgradeItem := range providers {
+ // If there is not a specified next version, skip it (we are already up-to-date).
+ if upgradeItem.NextVersion == "" {
+ continue
+ }
+
+ // Gets the provider components for the target version.
+ components, err := u.getUpgradeComponents(upgradeItem)
+ if err != nil {
+ return err
+ }
+
+ c, err := u.proxy.NewClient()
+ if err != nil {
+ return err
+ }
+
+ if err := newCRDMigrator(c).Run(ctx, components.Objs()); err != nil {
+ return err
+ }
+ }
+
// Scale down all providers.
// This is done to ensure all Pods of all "old" provider Deployments have been deleted.
// Otherwise it can happen that a provider Pod survives the upgrade because we create
diff --git a/cmd/clusterctl/client/config/cert_manager_client.go b/cmd/clusterctl/client/config/cert_manager_client.go
index 2271ca2ee777..65ecd0e9d001 100644
--- a/cmd/clusterctl/client/config/cert_manager_client.go
+++ b/cmd/clusterctl/client/config/cert_manager_client.go
@@ -27,7 +27,7 @@ const (
CertManagerConfigKey = "cert-manager"
// CertManagerDefaultVersion defines the default cert-manager version to be used by clusterctl.
- CertManagerDefaultVersion = "v1.5.3"
+ CertManagerDefaultVersion = "v1.7.2"
// CertManagerDefaultURL defines the default cert-manager repository url to be used by clusterctl.
// NOTE: At runtime /latest will be replaced with the CertManagerDefaultVersion or with the
diff --git a/cmd/clusterctl/client/config/providers_client.go b/cmd/clusterctl/client/config/providers_client.go
index 5d1b5404e496..f334c88600cd 100644
--- a/cmd/clusterctl/client/config/providers_client.go
+++ b/cmd/clusterctl/client/config/providers_client.go
@@ -35,21 +35,26 @@ const (
// Infra providers.
const (
- AWSProviderName = "aws"
- AzureProviderName = "azure"
- BYOHProviderName = "byoh"
- DockerProviderName = "docker"
- DOProviderName = "digitalocean"
- GCPProviderName = "gcp"
- HetznerProviderName = "hetzner"
- IBMCloudProviderName = "ibmcloud"
- Metal3ProviderName = "metal3"
- NestedProviderName = "nested"
- OpenStackProviderName = "openstack"
- PacketProviderName = "packet"
- SideroProviderName = "sidero"
- VSphereProviderName = "vsphere"
- MAASProviderName = "maas"
+ AWSProviderName = "aws"
+ AzureProviderName = "azure"
+ BYOHProviderName = "byoh"
+ CloudStackProviderName = "cloudstack"
+ DockerProviderName = "docker"
+ DOProviderName = "digitalocean"
+ GCPProviderName = "gcp"
+ HetznerProviderName = "hetzner"
+ IBMCloudProviderName = "ibmcloud"
+ Metal3ProviderName = "metal3"
+ NestedProviderName = "nested"
+ NutanixProviderName = "nutanix"
+ OCIProviderName = "oci"
+ OpenStackProviderName = "openstack"
+ PacketProviderName = "packet"
+ SideroProviderName = "sidero"
+ VSphereProviderName = "vsphere"
+ MAASProviderName = "maas"
+ KubevirtProviderName = "kubevirt"
+ VclusterProviderName = "vcluster"
)
// Bootstrap providers.
@@ -130,6 +135,11 @@ func (p *providersClient) defaults() []Provider {
url: "https://github.com/kubernetes-sigs/cluster-api/releases/latest/infrastructure-components-development.yaml",
providerType: clusterctlv1.InfrastructureProviderType,
},
+ &provider{
+ name: CloudStackProviderName,
+ url: "https://github.com/kubernetes-sigs/cluster-api-provider-cloudstack/releases/latest/infrastructure-components.yaml",
+ providerType: clusterctlv1.InfrastructureProviderType,
+ },
&provider{
name: DOProviderName,
url: "https://github.com/kubernetes-sigs/cluster-api-provider-digitalocean/releases/latest/infrastructure-components.yaml",
@@ -155,6 +165,11 @@ func (p *providersClient) defaults() []Provider {
url: "https://github.com/kubernetes-sigs/cluster-api-provider-nested/releases/latest/infrastructure-components.yaml",
providerType: clusterctlv1.InfrastructureProviderType,
},
+ &provider{
+ name: OCIProviderName,
+ url: "https://github.com/oracle/cluster-api-provider-oci/releases/latest/infrastructure-components.yaml",
+ providerType: clusterctlv1.InfrastructureProviderType,
+ },
&provider{
name: OpenStackProviderName,
url: "https://github.com/kubernetes-sigs/cluster-api-provider-openstack/releases/latest/infrastructure-components.yaml",
@@ -162,7 +177,7 @@ func (p *providersClient) defaults() []Provider {
},
&provider{
name: SideroProviderName,
- url: "https://github.com/talos-systems/sidero/releases/latest/infrastructure-components.yaml",
+ url: "https://github.com/siderolabs/sidero/releases/latest/infrastructure-components.yaml",
providerType: clusterctlv1.InfrastructureProviderType,
},
&provider{
@@ -190,6 +205,21 @@ func (p *providersClient) defaults() []Provider {
url: "https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud/releases/latest/infrastructure-components.yaml",
providerType: clusterctlv1.InfrastructureProviderType,
},
+ &provider{
+ name: NutanixProviderName,
+ url: "https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/releases/latest/infrastructure-components.yaml",
+ providerType: clusterctlv1.InfrastructureProviderType,
+ },
+ &provider{
+ name: KubevirtProviderName,
+ url: "https://github.com/kubernetes-sigs/cluster-api-provider-kubevirt/releases/latest/infrastructure-components.yaml",
+ providerType: clusterctlv1.InfrastructureProviderType,
+ },
+ &provider{
+ name: VclusterProviderName,
+ url: "https://github.com/loft-sh/cluster-api-provider-vcluster/releases/latest/infrastructure-components.yaml",
+ providerType: clusterctlv1.InfrastructureProviderType,
+ },
// Bootstrap providers
&provider{
@@ -199,7 +229,7 @@ func (p *providersClient) defaults() []Provider {
},
&provider{
name: TalosBootstrapProviderName,
- url: "https://github.com/talos-systems/cluster-api-bootstrap-provider-talos/releases/latest/bootstrap-components.yaml",
+ url: "https://github.com/siderolabs/cluster-api-bootstrap-provider-talos/releases/latest/bootstrap-components.yaml",
providerType: clusterctlv1.BootstrapProviderType,
},
&provider{
@@ -215,7 +245,7 @@ func (p *providersClient) defaults() []Provider {
},
&provider{
name: TalosControlPlaneProviderName,
- url: "https://github.com/talos-systems/cluster-api-control-plane-provider-talos/releases/latest/control-plane-components.yaml",
+ url: "https://github.com/siderolabs/cluster-api-control-plane-provider-talos/releases/latest/control-plane-components.yaml",
providerType: clusterctlv1.ControlPlaneProviderType,
},
&provider{
diff --git a/cmd/clusterctl/client/config_test.go b/cmd/clusterctl/client/config_test.go
index 83d03272ff5f..1734303cf30f 100644
--- a/cmd/clusterctl/client/config_test.go
+++ b/cmd/clusterctl/client/config_test.go
@@ -66,17 +66,22 @@ func Test_clusterctlClient_GetProvidersConfig(t *testing.T) {
config.AWSProviderName,
config.AzureProviderName,
config.BYOHProviderName,
+ config.CloudStackProviderName,
config.DOProviderName,
config.DockerProviderName,
config.GCPProviderName,
config.HetznerProviderName,
config.IBMCloudProviderName,
+ config.KubevirtProviderName,
config.MAASProviderName,
config.Metal3ProviderName,
config.NestedProviderName,
+ config.NutanixProviderName,
+ config.OCIProviderName,
config.OpenStackProviderName,
config.PacketProviderName,
config.SideroProviderName,
+ config.VclusterProviderName,
config.VSphereProviderName,
},
wantErr: false,
@@ -100,17 +105,22 @@ func Test_clusterctlClient_GetProvidersConfig(t *testing.T) {
config.AWSProviderName,
config.AzureProviderName,
config.BYOHProviderName,
+ config.CloudStackProviderName,
config.DOProviderName,
config.DockerProviderName,
config.GCPProviderName,
config.HetznerProviderName,
config.IBMCloudProviderName,
+ config.KubevirtProviderName,
config.MAASProviderName,
config.Metal3ProviderName,
config.NestedProviderName,
+ config.NutanixProviderName,
+ config.OCIProviderName,
config.OpenStackProviderName,
config.PacketProviderName,
config.SideroProviderName,
+ config.VclusterProviderName,
config.VSphereProviderName,
},
wantErr: false,
diff --git a/cmd/clusterctl/client/repository/repository_github.go b/cmd/clusterctl/client/repository/repository_github.go
index b710cfb91da9..6d1f1b8ea4b1 100644
--- a/cmd/clusterctl/client/repository/repository_github.go
+++ b/cmd/clusterctl/client/repository/repository_github.go
@@ -24,11 +24,13 @@ import (
"net/url"
"path/filepath"
"strings"
+ "time"
"github.com/google/go-github/v33/github"
"github.com/pkg/errors"
"golang.org/x/oauth2"
"k8s.io/apimachinery/pkg/util/version"
+ "k8s.io/apimachinery/pkg/util/wait"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/cmd/clusterctl/client/config"
@@ -44,9 +46,11 @@ const (
var (
// Caches used to limit the number of GitHub API calls.
- cacheVersions = map[string][]string{}
- cacheReleases = map[string]*github.RepositoryRelease{}
- cacheFiles = map[string][]byte{}
+ cacheVersions = map[string][]string{}
+ cacheReleases = map[string]*github.RepositoryRelease{}
+ cacheFiles = map[string][]byte{}
+ retryableOperationInterval = 10 * time.Second
+ retryableOperationTimeout = 1 * time.Minute
)
// gitHubRepository provides support for providers hosted on GitHub.
@@ -216,9 +220,24 @@ func (g *gitHubRepository) getVersions() ([]string, error) {
// get all the releases
// NB. currently Github API does not support result ordering, so it not possible to limit results
- releases, _, err := client.Repositories.ListReleases(context.TODO(), g.owner, g.repository, nil)
- if err != nil {
- return nil, g.handleGithubErr(err, "failed to get the list of releases")
+ var releases []*github.RepositoryRelease
+ var retryError error
+ _ = wait.PollImmediate(retryableOperationInterval, retryableOperationTimeout, func() (bool, error) {
+ var listReleasesErr error
+ releases, _, listReleasesErr = client.Repositories.ListReleases(context.TODO(), g.owner, g.repository, nil)
+ if listReleasesErr != nil {
+ retryError = g.handleGithubErr(listReleasesErr, "failed to get the list of releases")
+ // return immediately if we are rate limited
+ if _, ok := listReleasesErr.(*github.RateLimitError); ok {
+ return false, retryError
+ }
+ return false, nil
+ }
+ retryError = nil
+ return true, nil
+ })
+ if retryError != nil {
+ return nil, retryError
}
versions := []string{}
for _, r := range releases {
@@ -247,13 +266,24 @@ func (g *gitHubRepository) getReleaseByTag(tag string) (*github.RepositoryReleas
client := g.getClient()
- release, _, err := client.Repositories.GetReleaseByTag(context.TODO(), g.owner, g.repository, tag)
- if err != nil {
- return nil, g.handleGithubErr(err, "failed to read release %q", tag)
- }
-
- if release == nil {
- return nil, errors.Errorf("failed to get release %q", tag)
+ var release *github.RepositoryRelease
+ var retryError error
+ _ = wait.PollImmediate(retryableOperationInterval, retryableOperationTimeout, func() (bool, error) {
+ var getReleasesErr error
+ release, _, getReleasesErr = client.Repositories.GetReleaseByTag(context.TODO(), g.owner, g.repository, tag)
+ if getReleasesErr != nil {
+ retryError = g.handleGithubErr(getReleasesErr, "failed to read release %q", tag)
+ // return immediately if we are rate limited
+ if _, ok := getReleasesErr.(*github.RateLimitError); ok {
+ return false, retryError
+ }
+ return false, nil
+ }
+ retryError = nil
+ return true, nil
+ })
+ if retryError != nil {
+ return nil, retryError
}
cacheReleases[cacheID] = release
@@ -262,6 +292,8 @@ func (g *gitHubRepository) getReleaseByTag(tag string) (*github.RepositoryReleas
// downloadFilesFromRelease download a file from release.
func (g *gitHubRepository) downloadFilesFromRelease(release *github.RepositoryRelease, fileName string) ([]byte, error) {
+ ctx := context.TODO()
+
cacheID := fmt.Sprintf("%s/%s:%s:%s", g.owner, g.repository, *release.TagName, fileName)
if content, ok := cacheFiles[cacheID]; ok {
return content, nil
@@ -282,18 +314,43 @@ func (g *gitHubRepository) downloadFilesFromRelease(release *github.RepositoryRe
return nil, errors.Errorf("failed to get file %q from %q release", fileName, *release.TagName)
}
- reader, redirect, err := client.Repositories.DownloadReleaseAsset(context.TODO(), g.owner, g.repository, *assetID, http.DefaultClient)
- if err != nil {
- return nil, g.handleGithubErr(err, "failed to download file %q from %q release", *release.TagName, fileName)
- }
- if redirect != "" {
- response, err := http.Get(redirect) //nolint:bodyclose,gosec // (NB: The reader is actually closed in a defer)
- if err != nil {
- return nil, errors.Wrapf(err, "failed to download file %q from %q release via redirect location %q", *release.TagName, fileName, redirect)
+ var reader io.ReadCloser
+ var retryError error
+ _ = wait.PollImmediate(retryableOperationInterval, retryableOperationTimeout, func() (bool, error) {
+ var redirect string
+ var downloadReleaseError error
+ reader, redirect, downloadReleaseError = client.Repositories.DownloadReleaseAsset(context.TODO(), g.owner, g.repository, *assetID, http.DefaultClient)
+ if downloadReleaseError != nil {
+ retryError = g.handleGithubErr(downloadReleaseError, "failed to download file %q from %q release", *release.TagName, fileName)
+ // return immediately if we are rate limited
+ if _, ok := downloadReleaseError.(*github.RateLimitError); ok {
+ return false, retryError
+ }
+ return false, nil
+ }
+ if redirect != "" {
+ req, err := http.NewRequestWithContext(ctx, http.MethodGet, redirect, http.NoBody)
+ if err != nil {
+ retryError = errors.Wrapf(err, "failed to download file %q from %q release via redirect location %q: failed to create request", *release.TagName, fileName, redirect)
+ return false, nil
+ }
+
+ response, err := http.DefaultClient.Do(req) //nolint:bodyclose // (NB: The reader is actually closed in a defer)
+ if err != nil {
+ retryError = errors.Wrapf(err, "failed to download file %q from %q release via redirect location %q", *release.TagName, fileName, redirect)
+ return false, nil
+ }
+ reader = response.Body
}
- reader = response.Body
+ retryError = nil
+ return true, nil
+ })
+ if reader != nil {
+ defer reader.Close()
+ }
+ if retryError != nil {
+ return nil, retryError
}
- defer reader.Close()
// Read contents from the reader (redirect or not), and return.
content, err := io.ReadAll(reader)
diff --git a/cmd/clusterctl/client/repository/repository_github_test.go b/cmd/clusterctl/client/repository/repository_github_test.go
index d6809c1daebe..c1ef05b528ea 100644
--- a/cmd/clusterctl/client/repository/repository_github_test.go
+++ b/cmd/clusterctl/client/repository/repository_github_test.go
@@ -20,6 +20,7 @@ import (
"fmt"
"net/http"
"testing"
+ "time"
"github.com/google/go-github/v33/github"
. "github.com/onsi/gomega"
@@ -31,6 +32,8 @@ import (
)
func Test_githubRepository_newGitHubRepository(t *testing.T) {
+ retryableOperationInterval = 200 * time.Millisecond
+ retryableOperationTimeout = 1 * time.Second
type field struct {
providerConfig config.Provider
variableClient config.VariablesClient
@@ -156,6 +159,8 @@ func Test_githubRepository_getComponentsPath(t *testing.T) {
}
func Test_githubRepository_getFile(t *testing.T) {
+ retryableOperationInterval = 200 * time.Millisecond
+ retryableOperationTimeout = 1 * time.Second
client, mux, teardown := test.NewFakeGitHub()
defer teardown()
@@ -228,6 +233,8 @@ func Test_githubRepository_getFile(t *testing.T) {
}
func Test_gitHubRepository_getVersions(t *testing.T) {
+ retryableOperationInterval = 200 * time.Millisecond
+ retryableOperationTimeout = 1 * time.Second
client, mux, teardown := test.NewFakeGitHub()
defer teardown()
@@ -284,6 +291,8 @@ func Test_gitHubRepository_getVersions(t *testing.T) {
}
func Test_gitHubRepository_getLatestContractRelease(t *testing.T) {
+ retryableOperationInterval = 200 * time.Millisecond
+ retryableOperationTimeout = 1 * time.Second
client, mux, teardown := test.NewFakeGitHub()
defer teardown()
@@ -540,6 +549,8 @@ func Test_gitHubRepository_getLatestPatchRelease(t *testing.T) {
}
func Test_gitHubRepository_getReleaseByTag(t *testing.T) {
+ retryableOperationInterval = 200 * time.Millisecond
+ retryableOperationTimeout = 1 * time.Second
client, mux, teardown := test.NewFakeGitHub()
defer teardown()
@@ -605,6 +616,8 @@ func Test_gitHubRepository_getReleaseByTag(t *testing.T) {
}
func Test_gitHubRepository_downloadFilesFromRelease(t *testing.T) {
+ retryableOperationInterval = 200 * time.Millisecond
+ retryableOperationTimeout = 1 * time.Second
client, mux, teardown := test.NewFakeGitHub()
defer teardown()
diff --git a/cmd/clusterctl/cmd/config_repositories_test.go b/cmd/clusterctl/cmd/config_repositories_test.go
index c19c6341cad0..1765c8176706 100644
--- a/cmd/clusterctl/cmd/config_repositories_test.go
+++ b/cmd/clusterctl/cmd/config_repositories_test.go
@@ -99,32 +99,37 @@ providers:
type: "CoreProvider"
`
-var expectedOutputText = `NAME TYPE URL FILE
-cluster-api CoreProvider https://github.com/myorg/myforkofclusterapi/releases/latest/ core_components.yaml
-another-provider BootstrapProvider ./ bootstrap-components.yaml
-aws-eks BootstrapProvider https://github.com/kubernetes-sigs/cluster-api-provider-aws/releases/latest/ eks-bootstrap-components.yaml
-kubeadm BootstrapProvider https://github.com/kubernetes-sigs/cluster-api/releases/latest/ bootstrap-components.yaml
-talos BootstrapProvider https://github.com/talos-systems/cluster-api-bootstrap-provider-talos/releases/latest/ bootstrap-components.yaml
-aws-eks ControlPlaneProvider https://github.com/kubernetes-sigs/cluster-api-provider-aws/releases/latest/ eks-controlplane-components.yaml
-kubeadm ControlPlaneProvider https://github.com/kubernetes-sigs/cluster-api/releases/latest/ control-plane-components.yaml
-nested ControlPlaneProvider https://github.com/kubernetes-sigs/cluster-api-provider-nested/releases/latest/ control-plane-components.yaml
-talos ControlPlaneProvider https://github.com/talos-systems/cluster-api-control-plane-provider-talos/releases/latest/ control-plane-components.yaml
-aws InfrastructureProvider my-aws-infrastructure-components.yaml
-azure InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-azure/releases/latest/ infrastructure-components.yaml
-byoh InfrastructureProvider https://github.com/vmware-tanzu/cluster-api-provider-bringyourownhost/releases/latest/ infrastructure-components.yaml
-digitalocean InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-digitalocean/releases/latest/ infrastructure-components.yaml
-docker InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api/releases/latest/ infrastructure-components-development.yaml
-gcp InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-gcp/releases/latest/ infrastructure-components.yaml
-hetzner InfrastructureProvider https://github.com/syself/cluster-api-provider-hetzner/releases/latest/ infrastructure-components.yaml
-ibmcloud InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud/releases/latest/ infrastructure-components.yaml
-maas InfrastructureProvider https://github.com/spectrocloud/cluster-api-provider-maas/releases/latest/ infrastructure-components.yaml
-metal3 InfrastructureProvider https://github.com/metal3-io/cluster-api-provider-metal3/releases/latest/ infrastructure-components.yaml
-my-infra-provider InfrastructureProvider /home/.cluster-api/overrides/infrastructure-docker/latest/ infrastructure-components.yaml
-nested InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-nested/releases/latest/ infrastructure-components.yaml
-openstack InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-openstack/releases/latest/ infrastructure-components.yaml
-packet InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-packet/releases/latest/ infrastructure-components.yaml
-sidero InfrastructureProvider https://github.com/talos-systems/sidero/releases/latest/ infrastructure-components.yaml
-vsphere InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/releases/latest/ infrastructure-components.yaml
+var expectedOutputText = `NAME TYPE URL FILE
+cluster-api CoreProvider https://github.com/myorg/myforkofclusterapi/releases/latest/ core_components.yaml
+another-provider BootstrapProvider ./ bootstrap-components.yaml
+aws-eks BootstrapProvider https://github.com/kubernetes-sigs/cluster-api-provider-aws/releases/latest/ eks-bootstrap-components.yaml
+kubeadm BootstrapProvider https://github.com/kubernetes-sigs/cluster-api/releases/latest/ bootstrap-components.yaml
+talos BootstrapProvider https://github.com/siderolabs/cluster-api-bootstrap-provider-talos/releases/latest/ bootstrap-components.yaml
+aws-eks ControlPlaneProvider https://github.com/kubernetes-sigs/cluster-api-provider-aws/releases/latest/ eks-controlplane-components.yaml
+kubeadm ControlPlaneProvider https://github.com/kubernetes-sigs/cluster-api/releases/latest/ control-plane-components.yaml
+nested ControlPlaneProvider https://github.com/kubernetes-sigs/cluster-api-provider-nested/releases/latest/ control-plane-components.yaml
+talos ControlPlaneProvider https://github.com/siderolabs/cluster-api-control-plane-provider-talos/releases/latest/ control-plane-components.yaml
+aws InfrastructureProvider my-aws-infrastructure-components.yaml
+azure InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-azure/releases/latest/ infrastructure-components.yaml
+byoh InfrastructureProvider https://github.com/vmware-tanzu/cluster-api-provider-bringyourownhost/releases/latest/ infrastructure-components.yaml
+cloudstack InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-cloudstack/releases/latest/ infrastructure-components.yaml
+digitalocean InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-digitalocean/releases/latest/ infrastructure-components.yaml
+docker InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api/releases/latest/ infrastructure-components-development.yaml
+gcp InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-gcp/releases/latest/ infrastructure-components.yaml
+hetzner InfrastructureProvider https://github.com/syself/cluster-api-provider-hetzner/releases/latest/ infrastructure-components.yaml
+ibmcloud InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud/releases/latest/ infrastructure-components.yaml
+kubevirt InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-kubevirt/releases/latest/ infrastructure-components.yaml
+maas InfrastructureProvider https://github.com/spectrocloud/cluster-api-provider-maas/releases/latest/ infrastructure-components.yaml
+metal3 InfrastructureProvider https://github.com/metal3-io/cluster-api-provider-metal3/releases/latest/ infrastructure-components.yaml
+my-infra-provider InfrastructureProvider /home/.cluster-api/overrides/infrastructure-docker/latest/ infrastructure-components.yaml
+nested InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-nested/releases/latest/ infrastructure-components.yaml
+nutanix InfrastructureProvider https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/releases/latest/ infrastructure-components.yaml
+oci InfrastructureProvider https://github.com/oracle/cluster-api-provider-oci/releases/latest/ infrastructure-components.yaml
+openstack InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-openstack/releases/latest/ infrastructure-components.yaml
+packet InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-packet/releases/latest/ infrastructure-components.yaml
+sidero InfrastructureProvider https://github.com/siderolabs/sidero/releases/latest/ infrastructure-components.yaml
+vcluster InfrastructureProvider https://github.com/loft-sh/cluster-api-provider-vcluster/releases/latest/ infrastructure-components.yaml
+vsphere InfrastructureProvider https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/releases/latest/ infrastructure-components.yaml
`
var expectedOutputYaml = `- File: core_components.yaml
@@ -146,7 +151,7 @@ var expectedOutputYaml = `- File: core_components.yaml
- File: bootstrap-components.yaml
Name: talos
ProviderType: BootstrapProvider
- URL: https://github.com/talos-systems/cluster-api-bootstrap-provider-talos/releases/latest/
+ URL: https://github.com/siderolabs/cluster-api-bootstrap-provider-talos/releases/latest/
- File: eks-controlplane-components.yaml
Name: aws-eks
ProviderType: ControlPlaneProvider
@@ -162,7 +167,7 @@ var expectedOutputYaml = `- File: core_components.yaml
- File: control-plane-components.yaml
Name: talos
ProviderType: ControlPlaneProvider
- URL: https://github.com/talos-systems/cluster-api-control-plane-provider-talos/releases/latest/
+ URL: https://github.com/siderolabs/cluster-api-control-plane-provider-talos/releases/latest/
- File: my-aws-infrastructure-components.yaml
Name: aws
ProviderType: InfrastructureProvider
@@ -175,6 +180,10 @@ var expectedOutputYaml = `- File: core_components.yaml
Name: byoh
ProviderType: InfrastructureProvider
URL: https://github.com/vmware-tanzu/cluster-api-provider-bringyourownhost/releases/latest/
+- File: infrastructure-components.yaml
+ Name: cloudstack
+ ProviderType: InfrastructureProvider
+ URL: https://github.com/kubernetes-sigs/cluster-api-provider-cloudstack/releases/latest/
- File: infrastructure-components.yaml
Name: digitalocean
ProviderType: InfrastructureProvider
@@ -195,6 +204,10 @@ var expectedOutputYaml = `- File: core_components.yaml
Name: ibmcloud
ProviderType: InfrastructureProvider
URL: https://github.com/kubernetes-sigs/cluster-api-provider-ibmcloud/releases/latest/
+- File: infrastructure-components.yaml
+ Name: kubevirt
+ ProviderType: InfrastructureProvider
+ URL: https://github.com/kubernetes-sigs/cluster-api-provider-kubevirt/releases/latest/
- File: infrastructure-components.yaml
Name: maas
ProviderType: InfrastructureProvider
@@ -211,6 +224,14 @@ var expectedOutputYaml = `- File: core_components.yaml
Name: nested
ProviderType: InfrastructureProvider
URL: https://github.com/kubernetes-sigs/cluster-api-provider-nested/releases/latest/
+- File: infrastructure-components.yaml
+ Name: nutanix
+ ProviderType: InfrastructureProvider
+ URL: https://github.com/nutanix-cloud-native/cluster-api-provider-nutanix/releases/latest/
+- File: infrastructure-components.yaml
+ Name: oci
+ ProviderType: InfrastructureProvider
+ URL: https://github.com/oracle/cluster-api-provider-oci/releases/latest/
- File: infrastructure-components.yaml
Name: openstack
ProviderType: InfrastructureProvider
@@ -222,7 +243,11 @@ var expectedOutputYaml = `- File: core_components.yaml
- File: infrastructure-components.yaml
Name: sidero
ProviderType: InfrastructureProvider
- URL: https://github.com/talos-systems/sidero/releases/latest/
+ URL: https://github.com/siderolabs/sidero/releases/latest/
+- File: infrastructure-components.yaml
+ Name: vcluster
+ ProviderType: InfrastructureProvider
+ URL: https://github.com/loft-sh/cluster-api-provider-vcluster/releases/latest/
- File: infrastructure-components.yaml
Name: vsphere
ProviderType: InfrastructureProvider
diff --git a/controlplane/kubeadm/internal/workload_cluster_coredns.go b/controlplane/kubeadm/internal/workload_cluster_coredns.go
index 97c062e0df21..d25293b81d7b 100644
--- a/controlplane/kubeadm/internal/workload_cluster_coredns.go
+++ b/controlplane/kubeadm/internal/workload_cluster_coredns.go
@@ -125,6 +125,12 @@ func (w *Workload) UpdateCoreDNS(ctx context.Context, kcp *controlplanev1.Kubead
return err
}
+ // Update the cluster role independent of image change. Kubernetes may get updated
+ // to v1.22 which requires updating the cluster role without image changes.
+ if err := w.updateCoreDNSClusterRole(ctx, version, info); err != nil {
+ return err
+ }
+
// Return early if the from/to image is the same.
if info.FromImage == info.ToImage {
return nil
@@ -142,9 +148,7 @@ func (w *Workload) UpdateCoreDNS(ctx context.Context, kcp *controlplanev1.Kubead
if err := w.updateCoreDNSCorefile(ctx, info); err != nil {
return err
}
- if err := w.updateCoreDNSClusterRole(ctx, version, info); err != nil {
- return err
- }
+
if err := w.updateCoreDNSDeployment(ctx, info); err != nil {
return errors.Wrap(err, "unable to update coredns deployment")
}
@@ -273,6 +277,19 @@ func (w *Workload) updateCoreDNSClusterRole(ctx context.Context, kubernetesVersi
return nil
}
+ sourceCoreDNSVersion, err := extractImageVersion(info.FromImageTag)
+ if err != nil {
+ return err
+ }
+ // Do nothing for Kubernetes > 1.22 and sourceCoreDNSVersion >= 1.8.1.
+ // With those versions we know that the ClusterRole has already been updated,
+ // as there must have been a previous upgrade to Kubernetes 1.22
+ // (Kubernetes minor versions cannot be skipped) and to CoreDNS >= v1.8.1.
+ if kubernetesVersion.GTE(semver.Version{Major: 1, Minor: 23, Patch: 0}) &&
+ sourceCoreDNSVersion.GTE(semver.Version{Major: 1, Minor: 8, Patch: 1}) {
+ return nil
+ }
+
key := ctrlclient.ObjectKey{Name: coreDNSClusterRoleName, Namespace: metav1.NamespaceSystem}
return retry.RetryOnConflict(retry.DefaultBackoff, func() error {
currentClusterRole := &rbacv1.ClusterRole{}
diff --git a/controlplane/kubeadm/internal/workload_cluster_coredns_test.go b/controlplane/kubeadm/internal/workload_cluster_coredns_test.go
index 12e143a0d57e..f6568901047c 100644
--- a/controlplane/kubeadm/internal/workload_cluster_coredns_test.go
+++ b/controlplane/kubeadm/internal/workload_cluster_coredns_test.go
@@ -108,6 +108,16 @@ func TestUpdateCoreDNS(t *testing.T) {
"Corefile": expectedCorefile,
},
}
+ updatedCM := &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: coreDNSKey,
+ Namespace: metav1.NamespaceSystem,
+ },
+ Data: map[string]string{
+ "Corefile": "updated-core-file",
+ "Corefile-backup": expectedCorefile,
+ },
+ }
kubeadmCM := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: kubeadmConfigKey,
@@ -124,15 +134,48 @@ func TestUpdateCoreDNS(t *testing.T) {
`),
},
}
+ kubeadmCM181 := &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: kubeadmConfigKey,
+ Namespace: metav1.NamespaceSystem,
+ },
+ Data: map[string]string{
+ "ClusterConfiguration": yaml.Raw(`
+ apiServer:
+ apiVersion: kubeadm.k8s.io/v1beta2
+ dns:
+ type: CoreDNS
+ imageTag: v1.8.1
+ imageRepository: k8s.gcr.io
+ kind: ClusterConfiguration
+ `),
+ },
+ }
+
+ oldCR := &rbacv1.ClusterRole{
+ TypeMeta: metav1.TypeMeta{
+ Kind: "ClusterRole",
+ APIVersion: "rbac.authorization.k8s.io/v1",
+ },
+ ObjectMeta: metav1.ObjectMeta{
+ Name: coreDNSClusterRoleName,
+ },
+ }
+
+ semver1191 := semver.MustParse("1.19.1")
+ semver1221 := semver.MustParse("1.22.1")
+ semver1230 := semver.MustParse("1.23.0")
tests := []struct {
name string
kcp *controlplanev1.KubeadmControlPlane
migrator coreDNSMigrator
+ semver semver.Version
objs []client.Object
expectErr bool
expectUpdates bool
expectImage string
+ expectRules []rbacv1.PolicyRule
}{
{
name: "returns early without error if skip core dns annotation is present",
@@ -150,6 +193,7 @@ func TestUpdateCoreDNS(t *testing.T) {
},
},
},
+ semver: semver1191,
objs: []client.Object{badCM},
expectErr: false,
},
@@ -160,23 +204,27 @@ func TestUpdateCoreDNS(t *testing.T) {
KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{},
},
},
+ semver: semver1191,
objs: []client.Object{badCM},
expectErr: false,
},
{
name: "returns early without error if CoreDNS info is not found",
kcp: validKCP,
+ semver: semver1191,
expectErr: false,
},
{
name: "returns error if there was a problem retrieving CoreDNS info",
kcp: validKCP,
+ semver: semver1191,
objs: []client.Object{badCM},
expectErr: true,
},
{
name: "returns early without error if CoreDNS fromImage == ToImage",
kcp: validKCP,
+ semver: semver1191,
objs: []client.Object{depl, cm},
expectErr: false,
},
@@ -198,6 +246,7 @@ func TestUpdateCoreDNS(t *testing.T) {
},
},
},
+ semver: semver1191,
objs: []client.Object{depl, cm},
expectErr: true,
},
@@ -218,6 +267,7 @@ func TestUpdateCoreDNS(t *testing.T) {
},
},
},
+ semver: semver1191,
// no kubeadmConfigMap available so it will trigger an error
objs: []client.Object{depl, cm},
expectErr: true,
@@ -242,6 +292,7 @@ func TestUpdateCoreDNS(t *testing.T) {
migrator: &fakeMigrator{
migrateErr: errors.New("failed to migrate"),
},
+ semver: semver1191,
objs: []client.Object{depl, cm, kubeadmCM},
expectErr: true,
},
@@ -265,6 +316,7 @@ func TestUpdateCoreDNS(t *testing.T) {
migrator: &fakeMigrator{
migratedCorefile: "updated-core-file",
},
+ semver: semver1191,
objs: []client.Object{depl, cm, kubeadmCM},
expectErr: false,
expectUpdates: true,
@@ -290,6 +342,7 @@ func TestUpdateCoreDNS(t *testing.T) {
migrator: &fakeMigrator{
migratedCorefile: "updated-core-file",
},
+ semver: semver1191,
objs: []client.Object{deplWithImage("k8s.gcr.io/some-repo/coredns:1.7.0"), cm, kubeadmCM},
expectErr: false,
expectUpdates: true,
@@ -314,6 +367,7 @@ func TestUpdateCoreDNS(t *testing.T) {
migrator: &fakeMigrator{
migratedCorefile: "updated-core-file",
},
+ semver: semver1191,
objs: []client.Object{deplWithImage("k8s.gcr.io/coredns:1.6.7"), cm, kubeadmCM},
expectErr: false,
expectUpdates: true,
@@ -338,6 +392,7 @@ func TestUpdateCoreDNS(t *testing.T) {
migrator: &fakeMigrator{
migratedCorefile: "updated-core-file",
},
+ semver: semver1191,
objs: []client.Object{deplWithImage("k8s.gcr.io/coredns:1.7.0"), cm, kubeadmCM},
expectErr: false,
expectUpdates: false,
@@ -361,6 +416,7 @@ func TestUpdateCoreDNS(t *testing.T) {
migrator: &fakeMigrator{
migratedCorefile: "updated-core-file",
},
+ semver: semver1191,
objs: []client.Object{deplWithImage("k8s.gcr.io/coredns:1.7.0"), cm, kubeadmCM},
expectErr: false,
expectUpdates: true,
@@ -382,12 +438,61 @@ func TestUpdateCoreDNS(t *testing.T) {
},
},
},
+ semver: semver1191,
migrator: &fakeMigrator{
migratedCorefile: "updated-core-file",
},
objs: []client.Object{deplWithImage("k8s.gcr.io/coredns/coredns:v1.8.0"), cm, kubeadmCM},
expectErr: false,
expectUpdates: false,
+ expectRules: oldCR.Rules,
+ },
+ {
+ name: "upgrade from Kubernetes v1.21.x to v1.22.y and update cluster role",
+ kcp: &controlplanev1.KubeadmControlPlane{
+ Spec: controlplanev1.KubeadmControlPlaneSpec{
+ KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{
+ ClusterConfiguration: &bootstrapv1.ClusterConfiguration{
+ DNS: bootstrapv1.DNS{
+ ImageMeta: bootstrapv1.ImageMeta{
+ ImageRepository: "k8s.gcr.io",
+ ImageTag: "v1.8.1", // NOTE: ImageTags requires the v prefix
+ },
+ },
+ },
+ },
+ },
+ },
+ migrator: &fakeMigrator{
+ migratedCorefile: "updated-core-file",
+ },
+ semver: semver1221,
+ objs: []client.Object{deplWithImage("k8s.gcr.io/coredns/coredns:v1.8.1"), updatedCM, kubeadmCM181, oldCR},
+ expectErr: false,
+ expectUpdates: true,
+ expectImage: "k8s.gcr.io/coredns/coredns:v1.8.1", // NOTE: ImageName has coredns/coredns
+ expectRules: coreDNS181PolicyRules,
+ },
+ {
+ name: "returns early without error if kubernetes version is >= v1.23",
+ kcp: &controlplanev1.KubeadmControlPlane{
+ Spec: controlplanev1.KubeadmControlPlaneSpec{
+ KubeadmConfigSpec: bootstrapv1.KubeadmConfigSpec{
+ ClusterConfiguration: &bootstrapv1.ClusterConfiguration{
+ DNS: bootstrapv1.DNS{
+ ImageMeta: bootstrapv1.ImageMeta{
+ ImageRepository: "k8s.gcr.io",
+ ImageTag: "v1.8.1", // NOTE: ImageTags requires the v prefix
+ },
+ },
+ },
+ },
+ },
+ },
+ semver: semver1230,
+ objs: []client.Object{deplWithImage("k8s.gcr.io/coredns/coredns:v1.8.1"), updatedCM, kubeadmCM181},
+ expectErr: false,
+ expectRules: oldCR.Rules,
},
}
@@ -412,7 +517,7 @@ func TestUpdateCoreDNS(t *testing.T) {
Client: env.GetClient(),
CoreDNSMigrator: tt.migrator,
}
- err := w.UpdateCoreDNS(ctx, tt.kcp, semver.MustParse("1.19.1"))
+ err := w.UpdateCoreDNS(ctx, tt.kcp, tt.semver)
if tt.expectErr {
g.Expect(err).To(HaveOccurred())
@@ -455,6 +560,15 @@ func TestUpdateCoreDNS(t *testing.T) {
g.Expect(env.Get(ctx, client.ObjectKey{Name: coreDNSKey, Namespace: metav1.NamespaceSystem}, &actualDeployment)).To(Succeed())
return actualDeployment.Spec.Template.Spec.Containers[0].Image
}, "5s").Should(Equal(tt.expectImage))
+
+ // assert CoreDNS ClusterRole
+ if tt.expectRules != nil {
+ var actualClusterRole rbacv1.ClusterRole
+ g.Eventually(func() []rbacv1.PolicyRule {
+ g.Expect(env.Get(ctx, client.ObjectKey{Name: coreDNSClusterRoleName}, &actualClusterRole)).To(Succeed())
+ return actualClusterRole.Rules
+ }, "5s").Should(Equal(tt.expectRules))
+ }
}
})
}
@@ -550,6 +664,7 @@ func TestUpdateCoreDNSClusterRole(t *testing.T) {
name string
kubernetesVersion semver.Version
coreDNSVersion string
+ sourceCoreDNSVersion string
coreDNSPolicyRules []rbacv1.PolicyRule
expectErr bool
expectCoreDNSPolicyRules []rbacv1.PolicyRule
@@ -568,10 +683,19 @@ func TestUpdateCoreDNSClusterRole(t *testing.T) {
coreDNSPolicyRules: coreDNS180PolicyRules,
expectCoreDNSPolicyRules: coreDNS180PolicyRules,
},
+ {
+ name: "does not patch ClusterRole: Kubernetes > 1.22 && CoreDNS >= 1.8.1",
+ kubernetesVersion: semver.Version{Major: 1, Minor: 23, Patch: 0},
+ coreDNSVersion: "1.8.1",
+ sourceCoreDNSVersion: "1.8.1",
+ coreDNSPolicyRules: coreDNS180PolicyRules,
+ expectCoreDNSPolicyRules: coreDNS180PolicyRules,
+ },
{
name: "does not patch ClusterRole: CoreDNS < 1.8.1",
kubernetesVersion: semver.Version{Major: 1, Minor: 22, Patch: 0},
coreDNSVersion: "1.8.0",
+ sourceCoreDNSVersion: "1.7.0",
coreDNSPolicyRules: coreDNS180PolicyRules,
expectCoreDNSPolicyRules: coreDNS180PolicyRules,
},
@@ -579,6 +703,7 @@ func TestUpdateCoreDNSClusterRole(t *testing.T) {
name: "patch ClusterRole: Kubernetes == 1.22 and CoreDNS == 1.8.1",
kubernetesVersion: semver.Version{Major: 1, Minor: 22, Patch: 0},
coreDNSVersion: "1.8.1",
+ sourceCoreDNSVersion: "1.8.1",
coreDNSPolicyRules: coreDNS180PolicyRules,
expectCoreDNSPolicyRules: coreDNS181PolicyRules,
},
@@ -586,6 +711,7 @@ func TestUpdateCoreDNSClusterRole(t *testing.T) {
name: "patch ClusterRole: Kubernetes > 1.22 and CoreDNS > 1.8.1",
kubernetesVersion: semver.Version{Major: 1, Minor: 22, Patch: 2},
coreDNSVersion: "1.8.5",
+ sourceCoreDNSVersion: "1.8.1",
coreDNSPolicyRules: coreDNS180PolicyRules,
expectCoreDNSPolicyRules: coreDNS181PolicyRules,
},
@@ -593,6 +719,7 @@ func TestUpdateCoreDNSClusterRole(t *testing.T) {
name: "patch ClusterRole: Kubernetes > 1.22 and CoreDNS > 1.8.1: no-op",
kubernetesVersion: semver.Version{Major: 1, Minor: 22, Patch: 2},
coreDNSVersion: "1.8.5",
+ sourceCoreDNSVersion: "1.8.5",
coreDNSPolicyRules: coreDNS181PolicyRules,
expectCoreDNSPolicyRules: coreDNS181PolicyRules,
},
@@ -615,7 +742,7 @@ func TestUpdateCoreDNSClusterRole(t *testing.T) {
Client: fakeClient,
}
- err := w.updateCoreDNSClusterRole(ctx, tt.kubernetesVersion, &coreDNSInfo{ToImageTag: tt.coreDNSVersion})
+ err := w.updateCoreDNSClusterRole(ctx, tt.kubernetesVersion, &coreDNSInfo{ToImageTag: tt.coreDNSVersion, FromImageTag: tt.sourceCoreDNSVersion})
if tt.expectErr {
g.Expect(err).To(HaveOccurred())
diff --git a/docs/book/src/clusterctl/commands/alpha-topology-plan.md b/docs/book/src/clusterctl/commands/alpha-topology-plan.md
index 3b28cec73388..ffa86a62150b 100644
--- a/docs/book/src/clusterctl/commands/alpha-topology-plan.md
+++ b/docs/book/src/clusterctl/commands/alpha-topology-plan.md
@@ -424,6 +424,17 @@ or validation on these objects. Defaulting and validation is only run on Cluster
+
+
### `--output-directory`, `-o` (REQUIRED)
Information about the objects that are created and updated is written to this directory.
diff --git a/docs/book/src/clusterctl/commands/init.md b/docs/book/src/clusterctl/commands/init.md
index 202a1eb05837..29311b04b35b 100644
--- a/docs/book/src/clusterctl/commands/init.md
+++ b/docs/book/src/clusterctl/commands/init.md
@@ -178,7 +178,7 @@ If this happens, there are no guarantees about the proper functioning of `cluste
Cluster API providers require a cert-manager version supporting the `cert-manager.io/v1` API to be installed in the cluster.
While doing init, clusterctl checks if there is a version of cert-manager already installed. If not, clusterctl will
-install a default version (currently cert-manager v1.5.3). See [clusterctl configuration](../configuration.md) for
+install a default version (currently cert-manager v1.7.2). See [clusterctl configuration](../configuration.md) for
available options to customize this operation.