diff --git a/.golangci.yml b/.golangci.yml index a5226462ff..5d1e4260bf 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,7 +1,7 @@ version: "2" run: timeout: 10m - go: "1.23" + go: "1.24" allow-parallel-runners: true linters: default: none @@ -117,23 +117,30 @@ linters: - pkg: github.com/vmware-tanzu/nsx-operator/pkg/apis/vpc/v1alpha1 alias: nsxvpcv1 # CABPK - - pkg: sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1 + - pkg: sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2 alias: bootstrapv1 + - pkg: sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta1 + alias: bootstrapv1beta1 # KCP - - pkg: sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1 + - pkg: sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2 alias: controlplanev1 # CAPI - - pkg: sigs.k8s.io/cluster-api/api/v1beta1 + - pkg: sigs.k8s.io/cluster-api/api/core/v1beta2 alias: clusterv1 - # CAPI exp - - pkg: sigs.k8s.io/cluster-api/exp/api/v1beta1 - alias: expv1 - # CAPI exp addons - - pkg: sigs.k8s.io/cluster-api/api/addons/v1beta1 + - pkg: sigs.k8s.io/cluster-api/api/core/v1beta1 + alias: clusterv1beta1 + - pkg: sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions + alias: deprecatedconditions + - pkg: sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions/v1beta2 + alias: deprecatedv1beta2conditions + # CAPI addons + - pkg: sigs.k8s.io/cluster-api/api/addons/v1beta2 alias: addonsv1 # CAPI IPAM - - pkg: sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1 + - pkg: sigs.k8s.io/cluster-api/api/ipam/v1beta2 alias: ipamv1 + - pkg: sigs.k8s.io/cluster-api/api/ipam/v1beta1 + alias: ipamv1beta1 nolintlint: require-specific: true allow-unused: false @@ -195,6 +202,22 @@ linters: - linters: - staticcheck text: 'SA1019: "sigs.k8s.io/cluster-api-provider-vsphere/apis/(v1alpha3|v1alpha4)" is deprecated: This package will be removed in one of the next releases.' + - linters: + - staticcheck + text: 'SA1019: "sigs.k8s.io/cluster-api/api/core/v1beta1" is deprecated: This package is deprecated and is going to be removed when support for v1beta1 will be dropped.' + # CAPV Govmomi aggregates from IPAddressClaims to v1beta1 conditions + - linters: + - staticcheck + text: 'SA1019: "sigs.k8s.io/cluster-api/api/ipam/v1beta1" is deprecated: This package is deprecated and is going to be removed when support for v1beta1 will be dropped.' + - linters: + - staticcheck + text: 'SA1019: "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/.*" is deprecated' + - linters: + - staticcheck + text: 'SA1019: corev1.Endpoints is deprecated: This API is deprecated in v1.33\+. Use discoveryv1.EndpointSlice.' + - linters: + - staticcheck + text: 'SA1019: corev1.EndpointSubset is deprecated: This API is deprecated in v1.33\+.' - linters: - revive text: 'exported: exported method .*\.(Reconcile|SetupWithManager|SetupWebhookWithManager) should have comment or be unexported' @@ -304,10 +327,6 @@ linters: - staticcheck path: ^(apis/(v1alpha3|v1alpha4)\/.*)\.go$ text: ST1021|ST1020 - # Ignore non-constant format string in call to condition utils - - linters: - - govet - text: non-constant format string in call to sigs\.k8s\.io\/cluster-api\/util\/conditions\. - linters: - goconst path: (.+)_test\.go diff --git a/Makefile b/Makefile index fddda87325..f2dfe21493 100644 --- a/Makefile +++ b/Makefile @@ -23,8 +23,8 @@ SHELL:=/usr/bin/env bash # # Go. # -GO_VERSION ?= 1.23.8 -GO_DIRECTIVE_VERSION ?= 1.23.0 +GO_VERSION ?= 1.24.3 +GO_DIRECTIVE_VERSION ?= 1.24.0 GO_CONTAINER_IMAGE ?= docker.io/library/golang:$(GO_VERSION) # Ensure correct toolchain is used @@ -113,12 +113,12 @@ KUSTOMIZE_BIN := kustomize KUSTOMIZE := $(abspath $(TOOLS_BIN_DIR)/$(KUSTOMIZE_BIN)-$(KUSTOMIZE_VER)) KUSTOMIZE_PKG := sigs.k8s.io/kustomize/kustomize/v4 -SETUP_ENVTEST_VER := release-0.20 +SETUP_ENVTEST_VER := release-0.21 SETUP_ENVTEST_BIN := setup-envtest SETUP_ENVTEST := $(abspath $(TOOLS_BIN_DIR)/$(SETUP_ENVTEST_BIN)-$(SETUP_ENVTEST_VER)) SETUP_ENVTEST_PKG := sigs.k8s.io/controller-runtime/tools/setup-envtest -CONTROLLER_GEN_VER := v0.17.0 +CONTROLLER_GEN_VER := v0.18.0 CONTROLLER_GEN_BIN := controller-gen CONTROLLER_GEN := $(abspath $(TOOLS_BIN_DIR)/$(CONTROLLER_GEN_BIN)-$(CONTROLLER_GEN_VER)) CONTROLLER_GEN_PKG := sigs.k8s.io/controller-tools/cmd/controller-gen @@ -128,7 +128,7 @@ GOTESTSUM_BIN := gotestsum GOTESTSUM := $(abspath $(TOOLS_BIN_DIR)/$(GOTESTSUM_BIN)-$(GOTESTSUM_VER)) GOTESTSUM_PKG := gotest.tools/gotestsum -CONVERSION_GEN_VER := v0.32.0 +CONVERSION_GEN_VER := v0.33.0 CONVERSION_GEN_BIN := conversion-gen # We are intentionally using the binary without version suffix, to avoid the version # in generated files. @@ -190,7 +190,7 @@ IMPORT_BOSS_VER := v0.28.1 IMPORT_BOSS := $(abspath $(TOOLS_BIN_DIR)/$(IMPORT_BOSS_BIN)) IMPORT_BOSS_PKG := k8s.io/code-generator/cmd/import-boss -CAPI_HACK_TOOLS_VER := 647a1b741bc86dd759583a44b50a3ba8dbd326c4 # Note: this the commit ID of CAPI v1.10.1 +CAPI_HACK_TOOLS_VER := 071d769879ff13ae358c82a6d267da97e13f05f6 # Note: this the commit ID of CAPI on main at 19th May 2025 BOSKOSCTL_BIN := boskosctl BOSKOSCTL := $(abspath $(TOOLS_BIN_DIR)/$(BOSKOSCTL_BIN)) diff --git a/apis/v1alpha3/conversion_test.go b/apis/v1alpha3/conversion_test.go index 2bf5fc9d13..697a686276 100644 --- a/apis/v1alpha3/conversion_test.go +++ b/apis/v1alpha3/conversion_test.go @@ -19,12 +19,12 @@ package v1alpha3 import ( "testing" - fuzz "github.com/google/gofuzz" . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/api/apitesting/fuzzer" "k8s.io/apimachinery/pkg/runtime" runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" utilconversion "sigs.k8s.io/cluster-api/util/conversion" + "sigs.k8s.io/randfill" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" ) @@ -67,7 +67,7 @@ func TestFuzzyConversion(t *testing.T) { func overrideVSphereClusterDeprecatedFieldsFuncs(runtimeserializer.CodecFactory) []interface{} { return []interface{}{ - func(vsphereClusterSpec *VSphereClusterSpec, _ fuzz.Continue) { + func(vsphereClusterSpec *VSphereClusterSpec, _ randfill.Continue) { vsphereClusterSpec.CloudProviderConfiguration = CPIConfig{} }, } @@ -75,8 +75,8 @@ func overrideVSphereClusterDeprecatedFieldsFuncs(runtimeserializer.CodecFactory) func overrideVSphereClusterSpecFieldsFuncs(runtimeserializer.CodecFactory) []interface{} { return []interface{}{ - func(in *infrav1.VSphereClusterSpec, c fuzz.Continue) { - c.FuzzNoCustom(in) + func(in *infrav1.VSphereClusterSpec, c randfill.Continue) { + c.FillNoCustom(in) in.ClusterModules = nil in.FailureDomainSelector = nil in.DisableClusterModule = false @@ -86,8 +86,8 @@ func overrideVSphereClusterSpecFieldsFuncs(runtimeserializer.CodecFactory) []int func overrideVSphereClusterStatusFieldsFuncs(runtimeserializer.CodecFactory) []interface{} { return []interface{}{ - func(in *infrav1.VSphereClusterStatus, c fuzz.Continue) { - c.FuzzNoCustom(in) + func(in *infrav1.VSphereClusterStatus, c randfill.Continue) { + c.FillNoCustom(in) in.VCenterVersion = "" }, } @@ -99,8 +99,8 @@ func CustomObjectMetaFuzzFunc(runtimeserializer.CodecFactory) []interface{} { } } -func CustomObjectMetaFuzzer(in *ObjectMeta, c fuzz.Continue) { - c.FuzzNoCustom(in) +func CustomObjectMetaFuzzer(in *ObjectMeta, c randfill.Continue) { + c.FillNoCustom(in) // These fields have been removed in v1alpha4 // data is going to be lost, so we're forcing zero values here. @@ -117,8 +117,8 @@ func CustomNewFieldFuzzFunc(runtimeserializer.CodecFactory) []interface{} { } } -func CustomSpecNewFieldFuzzer(in *infrav1.VirtualMachineCloneSpec, c fuzz.Continue) { - c.FuzzNoCustom(in) +func CustomSpecNewFieldFuzzer(in *infrav1.VirtualMachineCloneSpec, c randfill.Continue) { + c.FillNoCustom(in) in.PciDevices = nil in.AdditionalDisksGiB = nil @@ -126,8 +126,8 @@ func CustomSpecNewFieldFuzzer(in *infrav1.VirtualMachineCloneSpec, c fuzz.Contin in.HardwareVersion = "" } -func CustomStatusNewFieldFuzzer(in *infrav1.VSphereVMStatus, c fuzz.Continue) { - c.FuzzNoCustom(in) +func CustomStatusNewFieldFuzzer(in *infrav1.VSphereVMStatus, c randfill.Continue) { + c.FillNoCustom(in) in.Host = "" in.ModuleUUID = nil diff --git a/apis/v1alpha3/vspheremachinetemplate_conversion.go b/apis/v1alpha3/vspheremachinetemplate_conversion.go index d0883c0a46..02e5a9d80b 100644 --- a/apis/v1alpha3/vspheremachinetemplate_conversion.go +++ b/apis/v1alpha3/vspheremachinetemplate_conversion.go @@ -20,7 +20,7 @@ import ( "unsafe" apiconversion "k8s.io/apimachinery/pkg/conversion" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" utilconversion "sigs.k8s.io/cluster-api/util/conversion" "sigs.k8s.io/controller-runtime/pkg/conversion" @@ -79,14 +79,14 @@ func (dst *VSphereMachineTemplateList) ConvertFrom(srcRaw conversion.Hub) error return Convert_v1beta1_VSphereMachineTemplateList_To_v1alpha3_VSphereMachineTemplateList(src, dst, nil) } -func Convert_v1alpha3_ObjectMeta_To_v1beta1_ObjectMeta(in *ObjectMeta, out *clusterv1.ObjectMeta, s apiconversion.Scope) error { +func Convert_v1alpha3_ObjectMeta_To_v1beta1_ObjectMeta(in *ObjectMeta, out *clusterv1beta1.ObjectMeta, s apiconversion.Scope) error { // wrapping the conversion func to avoid having compile errors due to compileErrorOnMissingConversion() // more details at https://github.com/kubernetes/kubernetes/issues/98380 return autoConvert_v1alpha3_ObjectMeta_To_v1beta1_ObjectMeta(in, out, s) } // autoConvert_v1alpha3_ObjectMeta_To_v1beta1_ObjectMeta was copied over from CAPI because it is now internal there. -func autoConvert_v1alpha3_ObjectMeta_To_v1beta1_ObjectMeta(in *ObjectMeta, out *clusterv1.ObjectMeta, s apiconversion.Scope) error { +func autoConvert_v1alpha3_ObjectMeta_To_v1beta1_ObjectMeta(in *ObjectMeta, out *clusterv1beta1.ObjectMeta, s apiconversion.Scope) error { // WARNING: in.Name requires manual conversion: does not exist in peer-type // WARNING: in.GenerateName requires manual conversion: does not exist in peer-type // WARNING: in.Namespace requires manual conversion: does not exist in peer-type @@ -96,14 +96,14 @@ func autoConvert_v1alpha3_ObjectMeta_To_v1beta1_ObjectMeta(in *ObjectMeta, out * return nil } -func Convert_v1beta1_ObjectMeta_To_v1alpha3_ObjectMeta(in *clusterv1.ObjectMeta, out *ObjectMeta, s apiconversion.Scope) error { +func Convert_v1beta1_ObjectMeta_To_v1alpha3_ObjectMeta(in *clusterv1beta1.ObjectMeta, out *ObjectMeta, s apiconversion.Scope) error { // wrapping the conversion func to avoid having compile errors due to compileErrorOnMissingConversion() // more details at https://github.com/kubernetes/kubernetes/issues/98380 return autoConvert_v1beta1_ObjectMeta_To_v1alpha3_ObjectMeta(in, out, s) } // autoConvert_v1beta1_ObjectMeta_To_v1alpha3_ObjectMeta was copied over from CAPI because it is now internal there. -func autoConvert_v1beta1_ObjectMeta_To_v1alpha3_ObjectMeta(in *clusterv1.ObjectMeta, out *ObjectMeta, s apiconversion.Scope) error { +func autoConvert_v1beta1_ObjectMeta_To_v1alpha3_ObjectMeta(in *clusterv1beta1.ObjectMeta, out *ObjectMeta, s apiconversion.Scope) error { out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) return nil diff --git a/apis/v1alpha3/zz_generated.conversion.go b/apis/v1alpha3/zz_generated.conversion.go index 8b39fd137f..489f69d30f 100644 --- a/apis/v1alpha3/zz_generated.conversion.go +++ b/apis/v1alpha3/zz_generated.conversion.go @@ -29,7 +29,7 @@ import ( conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" v1beta1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" - apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + corev1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" errors "sigs.k8s.io/cluster-api/errors" ) @@ -420,8 +420,8 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*ObjectMeta)(nil), (*apiv1beta1.ObjectMeta)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_ObjectMeta_To_v1beta1_ObjectMeta(a.(*ObjectMeta), b.(*apiv1beta1.ObjectMeta), scope) + if err := s.AddConversionFunc((*ObjectMeta)(nil), (*corev1beta1.ObjectMeta)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_ObjectMeta_To_v1beta1_ObjectMeta(a.(*ObjectMeta), b.(*corev1beta1.ObjectMeta), scope) }); err != nil { return err } @@ -435,8 +435,8 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*apiv1beta1.ObjectMeta)(nil), (*ObjectMeta)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ObjectMeta_To_v1alpha3_ObjectMeta(a.(*apiv1beta1.ObjectMeta), b.(*ObjectMeta), scope) + if err := s.AddConversionFunc((*corev1beta1.ObjectMeta)(nil), (*ObjectMeta)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ObjectMeta_To_v1alpha3_ObjectMeta(a.(*corev1beta1.ObjectMeta), b.(*ObjectMeta), scope) }); err != nil { return err } @@ -994,7 +994,7 @@ func Convert_v1beta1_VSphereClusterIdentitySpec_To_v1alpha3_VSphereClusterIdenti func autoConvert_v1alpha3_VSphereClusterIdentityStatus_To_v1beta1_VSphereClusterIdentityStatus(in *VSphereClusterIdentityStatus, out *v1beta1.VSphereClusterIdentityStatus, s conversion.Scope) error { out.Ready = in.Ready - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } @@ -1080,8 +1080,8 @@ func autoConvert_v1beta1_VSphereClusterSpec_To_v1alpha3_VSphereClusterSpec(in *v func autoConvert_v1alpha3_VSphereClusterStatus_To_v1beta1_VSphereClusterStatus(in *VSphereClusterStatus, out *v1beta1.VSphereClusterStatus, s conversion.Scope) error { out.Ready = in.Ready - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) - out.FailureDomains = *(*apiv1beta1.FailureDomains)(unsafe.Pointer(&in.FailureDomains)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.FailureDomains = *(*corev1beta1.FailureDomains)(unsafe.Pointer(&in.FailureDomains)) return nil } @@ -1205,7 +1205,7 @@ func Convert_v1beta1_VSphereDeploymentZoneSpec_To_v1alpha3_VSphereDeploymentZone func autoConvert_v1alpha3_VSphereDeploymentZoneStatus_To_v1beta1_VSphereDeploymentZoneStatus(in *VSphereDeploymentZoneStatus, out *v1beta1.VSphereDeploymentZoneStatus, s conversion.Scope) error { out.Ready = (*bool)(unsafe.Pointer(in.Ready)) - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } @@ -1449,11 +1449,11 @@ func autoConvert_v1beta1_VSphereMachineSpec_To_v1alpha3_VSphereMachineSpec(in *v func autoConvert_v1alpha3_VSphereMachineStatus_To_v1beta1_VSphereMachineStatus(in *VSphereMachineStatus, out *v1beta1.VSphereMachineStatus, s conversion.Scope) error { out.Ready = in.Ready - out.Addresses = *(*[]apiv1beta1.MachineAddress)(unsafe.Pointer(&in.Addresses)) + out.Addresses = *(*[]corev1beta1.MachineAddress)(unsafe.Pointer(&in.Addresses)) out.Network = *(*[]v1beta1.NetworkStatus)(unsafe.Pointer(&in.Network)) out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason)) out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } @@ -1704,7 +1704,7 @@ func autoConvert_v1alpha3_VSphereVMStatus_To_v1beta1_VSphereVMStatus(in *VSphere out.Network = *(*[]v1beta1.NetworkStatus)(unsafe.Pointer(&in.Network)) out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason)) out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } diff --git a/apis/v1alpha4/conversion_test.go b/apis/v1alpha4/conversion_test.go index ee61c1d959..147defd98d 100644 --- a/apis/v1alpha4/conversion_test.go +++ b/apis/v1alpha4/conversion_test.go @@ -19,12 +19,12 @@ package v1alpha4 import ( "testing" - fuzz "github.com/google/gofuzz" . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/api/apitesting/fuzzer" "k8s.io/apimachinery/pkg/runtime" runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" utilconversion "sigs.k8s.io/cluster-api/util/conversion" + "sigs.k8s.io/randfill" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" ) @@ -66,8 +66,8 @@ func TestFuzzyConversion(t *testing.T) { func overrideVSphereClusterSpecFieldsFuncs(runtimeserializer.CodecFactory) []interface{} { return []interface{}{ - func(in *infrav1.VSphereClusterSpec, c fuzz.Continue) { - c.FuzzNoCustom(in) + func(in *infrav1.VSphereClusterSpec, c randfill.Continue) { + c.FillNoCustom(in) in.ClusterModules = nil in.FailureDomainSelector = nil in.DisableClusterModule = false @@ -77,8 +77,8 @@ func overrideVSphereClusterSpecFieldsFuncs(runtimeserializer.CodecFactory) []int func overrideVSphereClusterStatusFieldsFuncs(runtimeserializer.CodecFactory) []interface{} { return []interface{}{ - func(in *infrav1.VSphereClusterStatus, c fuzz.Continue) { - c.FuzzNoCustom(in) + func(in *infrav1.VSphereClusterStatus, c randfill.Continue) { + c.FillNoCustom(in) in.VCenterVersion = "" }, } @@ -91,8 +91,8 @@ func CustomNewFieldFuzzFunc(runtimeserializer.CodecFactory) []interface{} { } } -func CustomSpecNewFieldFuzzer(in *infrav1.VirtualMachineCloneSpec, c fuzz.Continue) { - c.FuzzNoCustom(in) +func CustomSpecNewFieldFuzzer(in *infrav1.VirtualMachineCloneSpec, c randfill.Continue) { + c.FillNoCustom(in) in.PciDevices = nil in.AdditionalDisksGiB = nil @@ -100,8 +100,8 @@ func CustomSpecNewFieldFuzzer(in *infrav1.VirtualMachineCloneSpec, c fuzz.Contin in.HardwareVersion = "" } -func CustomStatusNewFieldFuzzer(in *infrav1.VSphereVMStatus, c fuzz.Continue) { - c.FuzzNoCustom(in) +func CustomStatusNewFieldFuzzer(in *infrav1.VSphereVMStatus, c randfill.Continue) { + c.FillNoCustom(in) in.Host = "" in.ModuleUUID = nil diff --git a/apis/v1alpha4/vspheremachinetemplate_conversion.go b/apis/v1alpha4/vspheremachinetemplate_conversion.go index f9cb898efe..6d94e1f5f8 100644 --- a/apis/v1alpha4/vspheremachinetemplate_conversion.go +++ b/apis/v1alpha4/vspheremachinetemplate_conversion.go @@ -20,7 +20,7 @@ import ( "unsafe" apiconversion "k8s.io/apimachinery/pkg/conversion" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" utilconversion "sigs.k8s.io/cluster-api/util/conversion" "sigs.k8s.io/controller-runtime/pkg/conversion" @@ -80,27 +80,27 @@ func (dst *VSphereMachineTemplateList) ConvertFrom(srcRaw conversion.Hub) error return Convert_v1beta1_VSphereMachineTemplateList_To_v1alpha4_VSphereMachineTemplateList(src, dst, nil) } -func Convert_v1alpha4_ObjectMeta_To_v1beta1_ObjectMeta(in *ObjectMeta, out *clusterv1.ObjectMeta, s apiconversion.Scope) error { +func Convert_v1alpha4_ObjectMeta_To_v1beta1_ObjectMeta(in *ObjectMeta, out *clusterv1beta1.ObjectMeta, s apiconversion.Scope) error { // wrapping the conversion func to avoid having compile errors due to compileErrorOnMissingConversion() // more details at https://github.com/kubernetes/kubernetes/issues/98380 return autoConvert_v1alpha4_ObjectMeta_To_v1beta1_ObjectMeta(in, out, s) } // autoConvert_v1alpha4_ObjectMeta_To_v1beta1_ObjectMeta was copied over from CAPI because it is now internal there. -func autoConvert_v1alpha4_ObjectMeta_To_v1beta1_ObjectMeta(in *ObjectMeta, out *clusterv1.ObjectMeta, s apiconversion.Scope) error { +func autoConvert_v1alpha4_ObjectMeta_To_v1beta1_ObjectMeta(in *ObjectMeta, out *clusterv1beta1.ObjectMeta, s apiconversion.Scope) error { out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) return nil } -func Convert_v1beta1_ObjectMeta_To_v1alpha4_ObjectMeta(in *clusterv1.ObjectMeta, out *ObjectMeta, s apiconversion.Scope) error { +func Convert_v1beta1_ObjectMeta_To_v1alpha4_ObjectMeta(in *clusterv1beta1.ObjectMeta, out *ObjectMeta, s apiconversion.Scope) error { // wrapping the conversion func to avoid having compile errors due to compileErrorOnMissingConversion() // more details at https://github.com/kubernetes/kubernetes/issues/98380 return autoConvert_v1beta1_ObjectMeta_To_v1alpha4_ObjectMeta(in, out, s) } // autoConvert_v1beta1_ObjectMeta_To_v1alpha4_ObjectMeta was copied over from CAPI because it is now internal there. -func autoConvert_v1beta1_ObjectMeta_To_v1alpha4_ObjectMeta(in *clusterv1.ObjectMeta, out *ObjectMeta, s apiconversion.Scope) error { +func autoConvert_v1beta1_ObjectMeta_To_v1alpha4_ObjectMeta(in *clusterv1beta1.ObjectMeta, out *ObjectMeta, s apiconversion.Scope) error { out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) return nil diff --git a/apis/v1alpha4/zz_generated.conversion.go b/apis/v1alpha4/zz_generated.conversion.go index 2e644a16e8..fd9ddcf91e 100644 --- a/apis/v1alpha4/zz_generated.conversion.go +++ b/apis/v1alpha4/zz_generated.conversion.go @@ -29,7 +29,7 @@ import ( conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" v1beta1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" - apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + corev1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" errors "sigs.k8s.io/cluster-api/errors" ) @@ -465,8 +465,8 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*ObjectMeta)(nil), (*apiv1beta1.ObjectMeta)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha4_ObjectMeta_To_v1beta1_ObjectMeta(a.(*ObjectMeta), b.(*apiv1beta1.ObjectMeta), scope) + if err := s.AddConversionFunc((*ObjectMeta)(nil), (*corev1beta1.ObjectMeta)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha4_ObjectMeta_To_v1beta1_ObjectMeta(a.(*ObjectMeta), b.(*corev1beta1.ObjectMeta), scope) }); err != nil { return err } @@ -475,8 +475,8 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddConversionFunc((*apiv1beta1.ObjectMeta)(nil), (*ObjectMeta)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_ObjectMeta_To_v1alpha4_ObjectMeta(a.(*apiv1beta1.ObjectMeta), b.(*ObjectMeta), scope) + if err := s.AddConversionFunc((*corev1beta1.ObjectMeta)(nil), (*ObjectMeta)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ObjectMeta_To_v1alpha4_ObjectMeta(a.(*corev1beta1.ObjectMeta), b.(*ObjectMeta), scope) }); err != nil { return err } @@ -1030,7 +1030,7 @@ func Convert_v1beta1_VSphereClusterIdentitySpec_To_v1alpha4_VSphereClusterIdenti func autoConvert_v1alpha4_VSphereClusterIdentityStatus_To_v1beta1_VSphereClusterIdentityStatus(in *VSphereClusterIdentityStatus, out *v1beta1.VSphereClusterIdentityStatus, s conversion.Scope) error { out.Ready = in.Ready - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } @@ -1118,8 +1118,8 @@ func autoConvert_v1beta1_VSphereClusterSpec_To_v1alpha4_VSphereClusterSpec(in *v func autoConvert_v1alpha4_VSphereClusterStatus_To_v1beta1_VSphereClusterStatus(in *VSphereClusterStatus, out *v1beta1.VSphereClusterStatus, s conversion.Scope) error { out.Ready = in.Ready - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) - out.FailureDomains = *(*apiv1beta1.FailureDomains)(unsafe.Pointer(&in.FailureDomains)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.FailureDomains = *(*corev1beta1.FailureDomains)(unsafe.Pointer(&in.FailureDomains)) return nil } @@ -1359,7 +1359,7 @@ func Convert_v1beta1_VSphereDeploymentZoneSpec_To_v1alpha4_VSphereDeploymentZone func autoConvert_v1alpha4_VSphereDeploymentZoneStatus_To_v1beta1_VSphereDeploymentZoneStatus(in *VSphereDeploymentZoneStatus, out *v1beta1.VSphereDeploymentZoneStatus, s conversion.Scope) error { out.Ready = (*bool)(unsafe.Pointer(in.Ready)) - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } @@ -1603,11 +1603,11 @@ func autoConvert_v1beta1_VSphereMachineSpec_To_v1alpha4_VSphereMachineSpec(in *v func autoConvert_v1alpha4_VSphereMachineStatus_To_v1beta1_VSphereMachineStatus(in *VSphereMachineStatus, out *v1beta1.VSphereMachineStatus, s conversion.Scope) error { out.Ready = in.Ready - out.Addresses = *(*[]apiv1beta1.MachineAddress)(unsafe.Pointer(&in.Addresses)) + out.Addresses = *(*[]corev1beta1.MachineAddress)(unsafe.Pointer(&in.Addresses)) out.Network = *(*[]v1beta1.NetworkStatus)(unsafe.Pointer(&in.Network)) out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason)) out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } @@ -1858,7 +1858,7 @@ func autoConvert_v1alpha4_VSphereVMStatus_To_v1beta1_VSphereVMStatus(in *VSphere out.Network = *(*[]v1beta1.NetworkStatus)(unsafe.Pointer(&in.Network)) out.FailureReason = (*errors.MachineStatusError)(unsafe.Pointer(in.FailureReason)) out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } diff --git a/apis/v1beta1/condition_consts.go b/apis/v1beta1/condition_consts.go index cfefad2750..5877a45893 100644 --- a/apis/v1beta1/condition_consts.go +++ b/apis/v1beta1/condition_consts.go @@ -16,14 +16,14 @@ limitations under the License. package v1beta1 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +import clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" // Conditions and condition Reasons for the VSphereCluster object. const ( // FailureDomainsAvailableCondition documents the status of the failure domains // associated to the VSphereCluster. - FailureDomainsAvailableCondition clusterv1.ConditionType = "FailureDomainsAvailable" + FailureDomainsAvailableCondition clusterv1beta1.ConditionType = "FailureDomainsAvailable" // FailureDomainsSkippedReason (Severity=Info) documents that some of the failure domain statuses // associated to the VSphereCluster are reported as not ready. @@ -43,7 +43,7 @@ const ( const ( // VMProvisionedCondition documents the status of the provisioning of a VSphereMachine and its underlying VSphereVM. - VMProvisionedCondition clusterv1.ConditionType = "VMProvisioned" + VMProvisionedCondition clusterv1beta1.ConditionType = "VMProvisioned" // WaitingForClusterInfrastructureReason (Severity=Info) documents a VSphereMachine waiting for the cluster // infrastructure to be ready before starting the provisioning process. @@ -106,7 +106,7 @@ const ( // the underlying VM and would require manual intervention to fix the situation. // // NOTE: This condition does not apply to VSphereMachine. - PCIDevicesDetachedCondition clusterv1.ConditionType = "PCIDevicesDetached" + PCIDevicesDetachedCondition clusterv1beta1.ConditionType = "PCIDevicesDetached" // NotFoundReason (Severity=Warning) documents the VSphereVM not having the PCI device attached during VM startup. // This would indicate that the PCI devices were removed out of band by an external entity. @@ -118,7 +118,7 @@ const ( const ( // VCenterAvailableCondition documents the connectivity with vcenter // for a given resource. - VCenterAvailableCondition clusterv1.ConditionType = "VCenterAvailable" + VCenterAvailableCondition clusterv1beta1.ConditionType = "VCenterAvailable" // VCenterUnreachableReason (Severity=Error) documents a controller detecting // issues with VCenter reachability. @@ -127,7 +127,7 @@ const ( const ( // ClusterModulesAvailableCondition documents the availability of cluster modules for the VSphereCluster object. - ClusterModulesAvailableCondition clusterv1.ConditionType = "ClusterModulesAvailable" + ClusterModulesAvailableCondition clusterv1beta1.ConditionType = "ClusterModulesAvailable" // MissingVCenterVersionReason (Severity=Warning) documents a controller detecting // the scenario in which the vCenter version is not set in the status of the VSphereCluster object. @@ -146,7 +146,7 @@ const ( const ( // CredentialsAvailableCondidtion is used by VSphereClusterIdentity when a credential // secret is available and unused by other VSphereClusterIdentities. - CredentialsAvailableCondidtion clusterv1.ConditionType = "CredentialsAvailable" + CredentialsAvailableCondidtion clusterv1beta1.ConditionType = "CredentialsAvailable" // SecretNotAvailableReason is used when the secret referenced by the VSphereClusterIdentity cannot be found. SecretNotAvailableReason = "SecretNotAvailable" @@ -160,7 +160,7 @@ const ( const ( // PlacementConstraintMetCondition documents whether the placement constraint is configured correctly or not. - PlacementConstraintMetCondition clusterv1.ConditionType = "PlacementConstraintMet" + PlacementConstraintMetCondition clusterv1beta1.ConditionType = "PlacementConstraintMet" // ResourcePoolNotFoundReason (Severity=Error) documents that the resource pool in the placement constraint // associated to the VSphereDeploymentZone is misconfigured. @@ -173,7 +173,7 @@ const ( const ( // VSphereFailureDomainValidatedCondition documents whether the failure domain for the deployment zone is configured correctly or not. - VSphereFailureDomainValidatedCondition clusterv1.ConditionType = "VSphereFailureDomainValidated" + VSphereFailureDomainValidatedCondition clusterv1beta1.ConditionType = "VSphereFailureDomainValidated" // RegionMisconfiguredReason (Severity=Error) documents that the region for the Failure Domain associated to // the VSphereDeploymentZone is misconfigured. @@ -206,7 +206,7 @@ const ( const ( // IPAddressClaimedCondition documents the status of claiming an IP address // from an IPAM provider. - IPAddressClaimedCondition clusterv1.ConditionType = "IPAddressClaimed" + IPAddressClaimedCondition clusterv1beta1.ConditionType = "IPAddressClaimed" // IPAddressClaimsBeingCreatedReason (Severity=Info) documents that claims for the // IP addresses required by the VSphereVM are being created. @@ -228,7 +228,7 @@ const ( const ( // GuestSoftPowerOffSucceededCondition documents the status of performing guest initiated // graceful shutdown. - GuestSoftPowerOffSucceededCondition clusterv1.ConditionType = "GuestSoftPowerOffSucceeded" + GuestSoftPowerOffSucceededCondition clusterv1beta1.ConditionType = "GuestSoftPowerOffSucceeded" // GuestSoftPowerOffInProgressReason (Severity=Info) documents that the guest receives // a graceful shutdown request. diff --git a/apis/v1beta1/types.go b/apis/v1beta1/types.go index 4652d201dd..b0d995d529 100644 --- a/apis/v1beta1/types.go +++ b/apis/v1beta1/types.go @@ -20,7 +20,7 @@ import ( "fmt" corev1 "k8s.io/api/core/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( @@ -250,7 +250,7 @@ type VSphereMachineTemplateResource struct { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - ObjectMeta clusterv1.ObjectMeta `json:"metadata,omitempty"` + ObjectMeta clusterv1beta1.ObjectMeta `json:"metadata,omitempty"` // Spec is the specification of the desired behavior of the machine. Spec VSphereMachineSpec `json:"spec"` diff --git a/apis/v1beta1/vspherecluster_types.go b/apis/v1beta1/vspherecluster_types.go index 4dccb51952..20186a02a9 100644 --- a/apis/v1beta1/vspherecluster_types.go +++ b/apis/v1beta1/vspherecluster_types.go @@ -18,7 +18,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( @@ -32,17 +32,17 @@ const ( const ( // VSphereClusterReadyV1Beta2Condition is true if the VSphereCluster's deletionTimestamp is not set, VSphereCluster's // FailureDomainsReady, VCenterAvailable and ClusterModulesReady conditions are true. - VSphereClusterReadyV1Beta2Condition = clusterv1.ReadyV1Beta2Condition + VSphereClusterReadyV1Beta2Condition = clusterv1beta1.ReadyV1Beta2Condition // VSphereClusterReadyV1Beta2Reason surfaces when the VSphereCluster readiness criteria is met. - VSphereClusterReadyV1Beta2Reason = clusterv1.ReadyV1Beta2Reason + VSphereClusterReadyV1Beta2Reason = clusterv1beta1.ReadyV1Beta2Reason // VSphereClusterNotReadyV1Beta2Reason surfaces when the VSphereCluster readiness criteria is not met. - VSphereClusterNotReadyV1Beta2Reason = clusterv1.NotReadyV1Beta2Reason + VSphereClusterNotReadyV1Beta2Reason = clusterv1beta1.NotReadyV1Beta2Reason // VSphereClusterReadyUnknownV1Beta2Reason surfaces when at least one VSphereCluster readiness criteria is unknown // and no VSphereCluster readiness criteria is not met. - VSphereClusterReadyUnknownV1Beta2Reason = clusterv1.ReadyUnknownV1Beta2Reason + VSphereClusterReadyUnknownV1Beta2Reason = clusterv1beta1.ReadyUnknownV1Beta2Reason ) // VSphereCluster's FailureDomainsReady condition and corresponding reasons that will be used in v1Beta2 API version. @@ -51,16 +51,16 @@ const ( VSphereClusterFailureDomainsReadyV1Beta2Condition = "FailureDomainsReady" // VSphereClusterFailureDomainsReadyV1Beta2Reason surfaces when the failure domains for a VSphereCluster are ready. - VSphereClusterFailureDomainsReadyV1Beta2Reason = clusterv1.ReadyV1Beta2Reason + VSphereClusterFailureDomainsReadyV1Beta2Reason = clusterv1beta1.ReadyV1Beta2Reason // VSphereClusterFailureDomainsWaitingForFailureDomainStatusV1Beta2Reason surfaces when not all VSphereFailureDomains for a VSphereCluster are ready. VSphereClusterFailureDomainsWaitingForFailureDomainStatusV1Beta2Reason = "WaitingForFailureDomainStatus" // VSphereClusterFailureDomainsNotReadyV1Beta2Reason surfaces when the failure domains for a VSphereCluster are not ready. - VSphereClusterFailureDomainsNotReadyV1Beta2Reason = clusterv1.NotReadyV1Beta2Reason + VSphereClusterFailureDomainsNotReadyV1Beta2Reason = clusterv1beta1.NotReadyV1Beta2Reason // VSphereClusterFailureDomainsDeletingV1Beta2Reason surfaces when the failure domains for a VSphereCluster are being deleted. - VSphereClusterFailureDomainsDeletingV1Beta2Reason = clusterv1.DeletingV1Beta2Reason + VSphereClusterFailureDomainsDeletingV1Beta2Reason = clusterv1beta1.DeletingV1Beta2Reason ) // VSphereCluster's VCenterAvailable condition and corresponding reasons that will be used in v1Beta2 API version. @@ -69,13 +69,13 @@ const ( VSphereClusterVCenterAvailableV1Beta2Condition = "VCenterAvailable" // VSphereClusterVCenterAvailableV1Beta2Reason surfaces when the vCenter for a VSphereCluster is available. - VSphereClusterVCenterAvailableV1Beta2Reason = clusterv1.AvailableV1Beta2Reason + VSphereClusterVCenterAvailableV1Beta2Reason = clusterv1beta1.AvailableV1Beta2Reason // VSphereClusterVCenterUnreachableV1Beta2Reason surfaces when the vCenter for a VSphereCluster is unreachable. VSphereClusterVCenterUnreachableV1Beta2Reason = "VCenterUnreachable" // VSphereClusterVCenterAvailableDeletingV1Beta2Reason surfaces when the VSphereCluster is being deleted. - VSphereClusterVCenterAvailableDeletingV1Beta2Reason = clusterv1.DeletingV1Beta2Reason + VSphereClusterVCenterAvailableDeletingV1Beta2Reason = clusterv1beta1.DeletingV1Beta2Reason ) // VSphereCluster's ClusterModulesReady condition and corresponding reasons that will be used in v1Beta2 API version. @@ -84,17 +84,17 @@ const ( VSphereClusterClusterModulesReadyV1Beta2Condition = "ClusterModulesReady" // VSphereClusterClusterModulesReadyV1Beta2Reason surfaces when the cluster modules for a VSphereCluster are ready. - VSphereClusterClusterModulesReadyV1Beta2Reason = clusterv1.ReadyV1Beta2Reason + VSphereClusterClusterModulesReadyV1Beta2Reason = clusterv1beta1.ReadyV1Beta2Reason // VSphereClusterModulesInvalidVCenterVersionV1Beta2Reason surfaces when the cluster modules for a VSphereCluster can't be reconciled // due to an invalid vCenter version. VSphereClusterModulesInvalidVCenterVersionV1Beta2Reason = "InvalidVCenterVersion" // VSphereClusterClusterModulesNotReadyV1Beta2Reason surfaces when the cluster modules for a VSphereCluster are not ready. - VSphereClusterClusterModulesNotReadyV1Beta2Reason = clusterv1.NotReadyV1Beta2Reason + VSphereClusterClusterModulesNotReadyV1Beta2Reason = clusterv1beta1.NotReadyV1Beta2Reason // VSphereClusterClusterModulesDeletingV1Beta2Reason surfaces when the cluster modules for a VSphereCluster are being deleted. - VSphereClusterClusterModulesDeletingV1Beta2Reason = clusterv1.DeletingV1Beta2Reason + VSphereClusterClusterModulesDeletingV1Beta2Reason = clusterv1beta1.DeletingV1Beta2Reason ) // VCenterVersion conveys the API version of the vCenter instance. @@ -166,10 +166,10 @@ type VSphereClusterStatus struct { // Conditions defines current service state of the VSphereCluster. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // FailureDomains is a list of failure domain objects synced from the infrastructure provider. - FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` + FailureDomains clusterv1beta1.FailureDomains `json:"failureDomains,omitempty"` // VCenterVersion defines the version of the vCenter server defined in the spec. VCenterVersion VCenterVersion `json:"vCenterVersion,omitempty"` @@ -210,12 +210,12 @@ type VSphereCluster struct { } // GetConditions returns the conditions for the VSphereCluster. -func (c *VSphereCluster) GetConditions() clusterv1.Conditions { +func (c *VSphereCluster) GetConditions() clusterv1beta1.Conditions { return c.Status.Conditions } // SetConditions sets conditions on the VSphereCluster. -func (c *VSphereCluster) SetConditions(conditions clusterv1.Conditions) { +func (c *VSphereCluster) SetConditions(conditions clusterv1beta1.Conditions) { c.Status.Conditions = conditions } diff --git a/apis/v1beta1/vsphereclusteridentity_types.go b/apis/v1beta1/vsphereclusteridentity_types.go index ae23092fff..3f3f27c9b3 100644 --- a/apis/v1beta1/vsphereclusteridentity_types.go +++ b/apis/v1beta1/vsphereclusteridentity_types.go @@ -18,7 +18,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( @@ -31,10 +31,10 @@ const ( // VSphereClusterIdentity's Available condition and corresponding reasons that will be used in v1Beta2 API version. const ( // VSphereClusterIdentityAvailableV1Beta2Condition documents the availability for a VSphereClusterIdentity. - VSphereClusterIdentityAvailableV1Beta2Condition = clusterv1.AvailableV1Beta2Condition + VSphereClusterIdentityAvailableV1Beta2Condition = clusterv1beta1.AvailableV1Beta2Condition // VSphereClusterIdentityAvailableV1Beta2Reason surfaces when the VSphereClusterIdentity is available. - VSphereClusterIdentityAvailableV1Beta2Reason = clusterv1.AvailableV1Beta2Reason + VSphereClusterIdentityAvailableV1Beta2Reason = clusterv1beta1.AvailableV1Beta2Reason // VSphereClusterIdentitySecretNotAvailableV1Beta2Reason surfaces when the VSphereClusterIdentity secret is not available. VSphereClusterIdentitySecretNotAvailableV1Beta2Reason = "SecretNotAvailable" @@ -46,7 +46,7 @@ const ( VSphereClusterIdentitySettingSecretOwnerReferenceFailedV1Beta2Reason = "SettingSecretOwnerReferenceFailed" // VSphereClusterIdentityDeletingV1Beta2Reason surfaces when the VSphereClusterIdentity is being deleted. - VSphereClusterIdentityDeletingV1Beta2Reason = clusterv1.DeletingV1Beta2Reason + VSphereClusterIdentityDeletingV1Beta2Reason = clusterv1beta1.DeletingV1Beta2Reason ) // VSphereClusterIdentitySpec contains a secret reference and a group of allowed namespaces. @@ -69,7 +69,7 @@ type VSphereClusterIdentityStatus struct { // Conditions defines current service state of the VSphereCluster. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // v1beta2 groups all the fields that will be added or modified in VSphereClusterIdentity's status with the V1Beta2 version. // +optional @@ -117,12 +117,12 @@ type VSphereIdentityReference struct { } // GetConditions returns the conditions for the VSphereClusterIdentity. -func (c *VSphereClusterIdentity) GetConditions() clusterv1.Conditions { +func (c *VSphereClusterIdentity) GetConditions() clusterv1beta1.Conditions { return c.Status.Conditions } // SetConditions sets the conditions on the VSphereClusterIdentity. -func (c *VSphereClusterIdentity) SetConditions(conditions clusterv1.Conditions) { +func (c *VSphereClusterIdentity) SetConditions(conditions clusterv1beta1.Conditions) { c.Status.Conditions = conditions } diff --git a/apis/v1beta1/vspheredeploymentzone_types.go b/apis/v1beta1/vspheredeploymentzone_types.go index 6b288361b5..64c93102c8 100644 --- a/apis/v1beta1/vspheredeploymentzone_types.go +++ b/apis/v1beta1/vspheredeploymentzone_types.go @@ -18,7 +18,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( @@ -32,17 +32,17 @@ const ( const ( // VSphereDeploymentZoneReadyV1Beta2Condition is true if the VSphereDeploymentZone's deletionTimestamp is not set, VSphereDeploymentZone's // VCenterAvailable, PlacementConstraintReady and FailureDomainValidated conditions are true. - VSphereDeploymentZoneReadyV1Beta2Condition = clusterv1.ReadyV1Beta2Condition + VSphereDeploymentZoneReadyV1Beta2Condition = clusterv1beta1.ReadyV1Beta2Condition // VSphereDeploymentZoneReadyV1Beta2Reason surfaces when the VSphereDeploymentZone readiness criteria is met. - VSphereDeploymentZoneReadyV1Beta2Reason = clusterv1.ReadyV1Beta2Reason + VSphereDeploymentZoneReadyV1Beta2Reason = clusterv1beta1.ReadyV1Beta2Reason // VSphereDeploymentZoneNotReadyV1Beta2Reason surfaces when the VSphereDeploymentZone readiness criteria is not met. - VSphereDeploymentZoneNotReadyV1Beta2Reason = clusterv1.NotReadyV1Beta2Reason + VSphereDeploymentZoneNotReadyV1Beta2Reason = clusterv1beta1.NotReadyV1Beta2Reason // VSphereDeploymentZoneReadyUnknownV1Beta2Reason surfaces when at least one VSphereDeploymentZone readiness criteria is unknown // and no VSphereDeploymentZone readiness criteria is not met. - VSphereDeploymentZoneReadyUnknownV1Beta2Reason = clusterv1.ReadyUnknownV1Beta2Reason + VSphereDeploymentZoneReadyUnknownV1Beta2Reason = clusterv1beta1.ReadyUnknownV1Beta2Reason ) // VSphereDeploymentZone's VCenterAvailable condition and corresponding reasons that will be used in v1Beta2 API version. @@ -51,13 +51,13 @@ const ( VSphereDeploymentZoneVCenterAvailableV1Beta2Condition = "VCenterAvailable" // VSphereDeploymentZoneVCenterAvailableV1Beta2Reason surfaces when the vCenter for a VSphereDeploymentZone is available. - VSphereDeploymentZoneVCenterAvailableV1Beta2Reason = clusterv1.AvailableV1Beta2Reason + VSphereDeploymentZoneVCenterAvailableV1Beta2Reason = clusterv1beta1.AvailableV1Beta2Reason // VSphereDeploymentZoneVCenterUnreachableV1Beta2Reason surfaces when the vCenter for a VSphereDeploymentZone is unreachable. VSphereDeploymentZoneVCenterUnreachableV1Beta2Reason = "VCenterUnreachable" // VSphereDeploymentZoneVCenterAvailableDeletingV1Beta2Reason surfaces when the vCenter for a VSphereDeploymentZone is being deleted. - VSphereDeploymentZoneVCenterAvailableDeletingV1Beta2Reason = clusterv1.DeletingV1Beta2Reason + VSphereDeploymentZoneVCenterAvailableDeletingV1Beta2Reason = clusterv1beta1.DeletingV1Beta2Reason ) // VSphereDeploymentZone's PlacementConstraintReady condition and corresponding reasons that will be used in v1Beta2 API version. @@ -66,7 +66,7 @@ const ( VSphereDeploymentZonePlacementConstraintReadyV1Beta2Condition = "PlacementConstraintReady" // VSphereDeploymentZonePlacementConstraintReadyV1Beta2Reason surfaces when the placement status for a VSphereDeploymentZone is ready. - VSphereDeploymentZonePlacementConstraintReadyV1Beta2Reason = clusterv1.ReadyV1Beta2Reason + VSphereDeploymentZonePlacementConstraintReadyV1Beta2Reason = clusterv1beta1.ReadyV1Beta2Reason // VSphereDeploymentZonePlacementConstraintResourcePoolNotFoundV1Beta2Reason surfaces when the resource pool for a VSphereDeploymentZone is not found. VSphereDeploymentZonePlacementConstraintResourcePoolNotFoundV1Beta2Reason = "ResourcePoolNotFound" @@ -75,7 +75,7 @@ const ( VSphereDeploymentZonePlacementConstraintFolderNotFoundV1Beta2Reason = "FolderNotFound" // VSphereDeploymentZonePlacementConstraintDeletingV1Beta2Reason surfaces when the VSphereDeploymentZone is being deleted. - VSphereDeploymentZonePlacementConstraintDeletingV1Beta2Reason = clusterv1.DeletingV1Beta2Reason + VSphereDeploymentZonePlacementConstraintDeletingV1Beta2Reason = clusterv1beta1.DeletingV1Beta2Reason ) // VSphereDeploymentZone's FailureDomainValidated condition and corresponding reasons that will be used in v1Beta2 API version. @@ -111,7 +111,7 @@ const ( VSphereDeploymentZoneFailureDomainResourcePoolNotFoundV1Beta2Reason = "ResourcePoolNotFound" // VSphereDeploymentZoneFailureDomainDeletingV1Beta2Reason surfaces when the VSphereDeploymentZone is being deleted. - VSphereDeploymentZoneFailureDomainDeletingV1Beta2Reason = clusterv1.DeletingV1Beta2Reason + VSphereDeploymentZoneFailureDomainDeletingV1Beta2Reason = clusterv1beta1.DeletingV1Beta2Reason ) // VSphereDeploymentZoneSpec defines the desired state of VSphereDeploymentZone. @@ -168,7 +168,7 @@ type VSphereDeploymentZoneStatus struct { // Conditions defines current service state of the VSphereMachine. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // v1beta2 groups all the fields that will be added or modified in VSphereDeploymentZone's status with the V1Beta2 version. // +optional @@ -202,12 +202,12 @@ type VSphereDeploymentZone struct { } // GetConditions returns the conditions for the VSphereDeploymentZone. -func (z *VSphereDeploymentZone) GetConditions() clusterv1.Conditions { +func (z *VSphereDeploymentZone) GetConditions() clusterv1beta1.Conditions { return z.Status.Conditions } // SetConditions sets the conditions on the VSphereDeploymentZone. -func (z *VSphereDeploymentZone) SetConditions(conditions clusterv1.Conditions) { +func (z *VSphereDeploymentZone) SetConditions(conditions clusterv1beta1.Conditions) { z.Status.Conditions = conditions } diff --git a/apis/v1beta1/vspheremachine_types.go b/apis/v1beta1/vspheremachine_types.go index 945b463fe0..cc6d31d1aa 100644 --- a/apis/v1beta1/vspheremachine_types.go +++ b/apis/v1beta1/vspheremachine_types.go @@ -18,7 +18,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/errors" ) @@ -33,17 +33,17 @@ const ( const ( // VSphereMachineReadyV1Beta2Condition is true if the VSphereMachine's deletionTimestamp is not set, VSphereMachine's // VirtualMachineProvisioned is true. - VSphereMachineReadyV1Beta2Condition = clusterv1.ReadyV1Beta2Condition + VSphereMachineReadyV1Beta2Condition = clusterv1beta1.ReadyV1Beta2Condition // VSphereMachineReadyV1Beta2Reason surfaces when the VSphereMachine readiness criteria is met. - VSphereMachineReadyV1Beta2Reason = clusterv1.ReadyV1Beta2Reason + VSphereMachineReadyV1Beta2Reason = clusterv1beta1.ReadyV1Beta2Reason // VSphereMachineNotReadyV1Beta2Reason surfaces when the VSphereMachine readiness criteria is not met. - VSphereMachineNotReadyV1Beta2Reason = clusterv1.NotReadyV1Beta2Reason + VSphereMachineNotReadyV1Beta2Reason = clusterv1beta1.NotReadyV1Beta2Reason // VSphereMachineReadyUnknownV1Beta2Reason surfaces when at least one VSphereMachine readiness criteria is unknown // and no VSphereMachine readiness criteria is not met. - VSphereMachineReadyUnknownV1Beta2Reason = clusterv1.ReadyUnknownV1Beta2Reason + VSphereMachineReadyUnknownV1Beta2Reason = clusterv1beta1.ReadyUnknownV1Beta2Reason ) // VSphereMachine's VirtualMachineProvisioned condition and corresponding reasons that will be used in v1Beta2 API version. @@ -61,15 +61,15 @@ const ( // VSphereMachineVirtualMachineWaitingForClusterInfrastructureReadyV1Beta2Reason documents the VirtualMachine that is controlled // by the VSphereMachine waiting for the cluster infrastructure to be ready. // Note: This reason is used only in govmomi mode. - VSphereMachineVirtualMachineWaitingForClusterInfrastructureReadyV1Beta2Reason = clusterv1.WaitingForClusterInfrastructureReadyV1Beta2Reason + VSphereMachineVirtualMachineWaitingForClusterInfrastructureReadyV1Beta2Reason = clusterv1beta1.WaitingForClusterInfrastructureReadyV1Beta2Reason // VSphereMachineVirtualMachineWaitingForControlPlaneInitializedV1Beta2Reason documents the VirtualMachine that is controlled // by the VSphereMachine waiting for the control plane to be initialized. - VSphereMachineVirtualMachineWaitingForControlPlaneInitializedV1Beta2Reason = clusterv1.WaitingForControlPlaneInitializedV1Beta2Reason + VSphereMachineVirtualMachineWaitingForControlPlaneInitializedV1Beta2Reason = clusterv1beta1.WaitingForControlPlaneInitializedV1Beta2Reason // VSphereMachineVirtualMachineWaitingForBootstrapDataV1Beta2Reason documents the VirtualMachine that is controlled // by the VSphereMachine waiting for the bootstrap data to be ready. - VSphereMachineVirtualMachineWaitingForBootstrapDataV1Beta2Reason = clusterv1.WaitingForBootstrapDataV1Beta2Reason + VSphereMachineVirtualMachineWaitingForBootstrapDataV1Beta2Reason = clusterv1beta1.WaitingForBootstrapDataV1Beta2Reason // VSphereMachineVirtualMachineProvisioningV1Beta2Reason surfaces when the VirtualMachine that is controlled // by the VSphereMachine is provisioning. @@ -92,15 +92,15 @@ const ( // VSphereMachineVirtualMachineProvisionedV1Beta2Reason surfaces when the VirtualMachine that is controlled // by the VSphereMachine is provisioned. - VSphereMachineVirtualMachineProvisionedV1Beta2Reason = clusterv1.ProvisionedV1Beta2Reason + VSphereMachineVirtualMachineProvisionedV1Beta2Reason = clusterv1beta1.ProvisionedV1Beta2Reason // VSphereMachineVirtualMachineNotProvisionedV1Beta2Reason surfaces when the VirtualMachine that is controlled // by the VSphereMachine is not provisioned. - VSphereMachineVirtualMachineNotProvisionedV1Beta2Reason = clusterv1.NotProvisionedV1Beta2Reason + VSphereMachineVirtualMachineNotProvisionedV1Beta2Reason = clusterv1beta1.NotProvisionedV1Beta2Reason // VSphereMachineVirtualMachineDeletingV1Beta2Reason surfaces when the VirtualMachine that is controlled // by the VSphereMachine is being deleted. - VSphereMachineVirtualMachineDeletingV1Beta2Reason = clusterv1.DeletingV1Beta2Reason + VSphereMachineVirtualMachineDeletingV1Beta2Reason = clusterv1beta1.DeletingV1Beta2Reason ) // VSphereMachineSpec defines the desired state of VSphereMachine. @@ -175,7 +175,7 @@ type VSphereMachineStatus struct { Ready bool `json:"ready"` // Addresses contains the VSphere instance associated addresses. - Addresses []clusterv1.MachineAddress `json:"addresses,omitempty"` + Addresses []clusterv1beta1.MachineAddress `json:"addresses,omitempty"` // Network returns the network status for each of the machine's configured // network interfaces. @@ -222,7 +222,7 @@ type VSphereMachineStatus struct { // Conditions defines current service state of the VSphereMachine. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // v1beta2 groups all the fields that will be added or modified in VSphereMachine's status with the V1Beta2 version. // +optional @@ -261,12 +261,12 @@ type VSphereMachine struct { } // GetConditions returns the conditions for a VSphereMachine. -func (m *VSphereMachine) GetConditions() clusterv1.Conditions { +func (m *VSphereMachine) GetConditions() clusterv1beta1.Conditions { return m.Status.Conditions } // SetConditions sets the conditions on a VSphereMachine. -func (m *VSphereMachine) SetConditions(conditions clusterv1.Conditions) { +func (m *VSphereMachine) SetConditions(conditions clusterv1beta1.Conditions) { m.Status.Conditions = conditions } diff --git a/apis/v1beta1/vspherevm_types.go b/apis/v1beta1/vspherevm_types.go index 15c2b60fdd..bf59f643a9 100644 --- a/apis/v1beta1/vspherevm_types.go +++ b/apis/v1beta1/vspherevm_types.go @@ -21,7 +21,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/errors" ) @@ -44,17 +44,17 @@ const ( const ( // VSphereVMReadyV1Beta2Condition is true if the VSphereVM's deletionTimestamp is not set, VSphereVM's // VirtualMachineProvisioned, VCenterAvailable and IPAddressClaimsFulfilled are true. - VSphereVMReadyV1Beta2Condition = clusterv1.ReadyV1Beta2Condition + VSphereVMReadyV1Beta2Condition = clusterv1beta1.ReadyV1Beta2Condition // VSphereVMReadyV1Beta2Reason surfaces when the VSphereVM readiness criteria is met. - VSphereVMReadyV1Beta2Reason = clusterv1.ReadyV1Beta2Reason + VSphereVMReadyV1Beta2Reason = clusterv1beta1.ReadyV1Beta2Reason // VSphereVMNotReadyV1Beta2Reason surfaces when the VSphereVM readiness criteria is not met. - VSphereVMNotReadyV1Beta2Reason = clusterv1.NotReadyV1Beta2Reason + VSphereVMNotReadyV1Beta2Reason = clusterv1beta1.NotReadyV1Beta2Reason // VSphereVMReadyUnknownV1Beta2Reason surfaces when at least one VSphereVM readiness criteria is unknown // and no VSphereVM readiness criteria is not met. - VSphereVMReadyUnknownV1Beta2Reason = clusterv1.ReadyUnknownV1Beta2Reason + VSphereVMReadyUnknownV1Beta2Reason = clusterv1beta1.ReadyUnknownV1Beta2Reason ) // VSphereVM's VirtualMachineProvisioned condition and corresponding reasons that will be used in v1Beta2 API version. @@ -87,7 +87,7 @@ const ( // VSphereVMVirtualMachineProvisionedV1Beta2Reason surfaces when the VirtualMachine that is controlled // by the VSphereVM is provisioned. - VSphereVMVirtualMachineProvisionedV1Beta2Reason = clusterv1.ProvisionedV1Beta2Reason + VSphereVMVirtualMachineProvisionedV1Beta2Reason = clusterv1beta1.ProvisionedV1Beta2Reason // VSphereVMVirtualMachineTaskFailedV1Beta2Reason surfaces when a task for the VirtualMachine that is controlled // by the VSphereVM failed; the reconcile look will automatically retry the operation, @@ -102,11 +102,11 @@ const ( // VSphereVMVirtualMachineNotProvisionedV1Beta2Reason surfaces when the VirtualMachine that is controlled // by the VSphereVM is not provisioned. - VSphereVMVirtualMachineNotProvisionedV1Beta2Reason = clusterv1.NotProvisionedV1Beta2Reason + VSphereVMVirtualMachineNotProvisionedV1Beta2Reason = clusterv1beta1.NotProvisionedV1Beta2Reason // VSphereVMVirtualMachineDeletingV1Beta2Reason surfaces when the VirtualMachine that is controlled // by the VSphereVM is being deleted. - VSphereVMVirtualMachineDeletingV1Beta2Reason = clusterv1.DeletingV1Beta2Reason + VSphereVMVirtualMachineDeletingV1Beta2Reason = clusterv1beta1.DeletingV1Beta2Reason ) // VSphereVM's VCenterAvailable condition and corresponding reasons that will be used in v1Beta2 API version. @@ -116,7 +116,7 @@ const ( // VSphereVMVCenterAvailableV1Beta2Reason documents the VCenter hosting the VSphereVM // being available. - VSphereVMVCenterAvailableV1Beta2Reason = clusterv1.AvailableV1Beta2Reason + VSphereVMVCenterAvailableV1Beta2Reason = clusterv1beta1.AvailableV1Beta2Reason // VSphereVMVCenterUnreachableV1Beta2Reason documents the VCenter hosting the VSphereVM // cannot be reached. @@ -301,7 +301,7 @@ type VSphereVMStatus struct { // Conditions defines current service state of the VSphereVM. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // ModuleUUID is the unique identifier for the vCenter cluster module construct // which is used to configure anti-affinity. Objects with the same ModuleUUID @@ -349,12 +349,12 @@ type VSphereVM struct { } // GetConditions returns the conditions for a VSphereVM. -func (r *VSphereVM) GetConditions() clusterv1.Conditions { +func (r *VSphereVM) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } // SetConditions sets the conditions on a VSphereVM. -func (r *VSphereVM) SetConditions(conditions clusterv1.Conditions) { +func (r *VSphereVM) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/apis/v1beta1/zz_generated.deepcopy.go b/apis/v1beta1/zz_generated.deepcopy.go index 1f11e3adbf..1c12980758 100644 --- a/apis/v1beta1/zz_generated.deepcopy.go +++ b/apis/v1beta1/zz_generated.deepcopy.go @@ -24,7 +24,7 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + corev1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/errors" ) @@ -573,7 +573,7 @@ func (in *VSphereClusterIdentityStatus) DeepCopyInto(out *VSphereClusterIdentity *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -685,14 +685,14 @@ func (in *VSphereClusterStatus) DeepCopyInto(out *VSphereClusterStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.FailureDomains != nil { in, out := &in.FailureDomains, &out.FailureDomains - *out = make(apiv1beta1.FailureDomains, len(*in)) + *out = make(corev1beta1.FailureDomains, len(*in)) for key, val := range *in { (*out)[key] = *val.DeepCopy() } @@ -916,7 +916,7 @@ func (in *VSphereDeploymentZoneStatus) DeepCopyInto(out *VSphereDeploymentZoneSt } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1166,7 +1166,7 @@ func (in *VSphereMachineStatus) DeepCopyInto(out *VSphereMachineStatus) { *out = *in if in.Addresses != nil { in, out := &in.Addresses, &out.Addresses - *out = make([]apiv1beta1.MachineAddress, len(*in)) + *out = make([]corev1beta1.MachineAddress, len(*in)) copy(*out, *in) } if in.Network != nil { @@ -1188,7 +1188,7 @@ func (in *VSphereMachineStatus) DeepCopyInto(out *VSphereMachineStatus) { } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1456,7 +1456,7 @@ func (in *VSphereVMStatus) DeepCopyInto(out *VSphereVMStatus) { } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/apis/vmware/v1beta1/conditions_consts.go b/apis/vmware/v1beta1/conditions_consts.go index 010e181677..871f2cbc6e 100644 --- a/apis/vmware/v1beta1/conditions_consts.go +++ b/apis/vmware/v1beta1/conditions_consts.go @@ -16,12 +16,12 @@ limitations under the License. package v1beta1 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +import clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" const ( // ResourcePolicyReadyCondition reports the successful creation of a // Resource Policy. - ResourcePolicyReadyCondition clusterv1.ConditionType = "ResourcePolicyReady" + ResourcePolicyReadyCondition clusterv1beta1.ConditionType = "ResourcePolicyReady" // ResourcePolicyCreationFailedReason used when any errors occur during // ResourcePolicy creation. @@ -31,7 +31,7 @@ const ( const ( // ClusterNetworkReadyCondition reports the successful provision of a // Cluster Network. - ClusterNetworkReadyCondition clusterv1.ConditionType = "ClusterNetworkReady" + ClusterNetworkReadyCondition clusterv1beta1.ConditionType = "ClusterNetworkReady" // ClusterNetworkProvisionStartedReason is used when waiting for Cluster // Network to be Ready. @@ -44,7 +44,7 @@ const ( const ( // LoadBalancerReadyCondition reports the successful reconciliation of // a static control plane endpoint. - LoadBalancerReadyCondition clusterv1.ConditionType = "LoadBalancerReady" + LoadBalancerReadyCondition clusterv1beta1.ConditionType = "LoadBalancerReady" // LoadBalancerCreationFailedReason is used when load balancer related // resources creation fails. @@ -73,7 +73,7 @@ const ( const ( // ProviderServiceAccountsReadyCondition documents the status of provider service accounts // and related Roles, RoleBindings and Secrets are created. - ProviderServiceAccountsReadyCondition clusterv1.ConditionType = "ProviderServiceAccountsReady" + ProviderServiceAccountsReadyCondition clusterv1beta1.ConditionType = "ProviderServiceAccountsReady" // ProviderServiceAccountsReconciliationFailedReason reports that provider service accounts related resources reconciliation failed. ProviderServiceAccountsReconciliationFailedReason = "ProviderServiceAccountsReconciliationFailed" @@ -94,7 +94,7 @@ const ( SupervisorHeadlessSvcPort = 6443 // ServiceDiscoveryReadyCondition documents the status of service discoveries. - ServiceDiscoveryReadyCondition clusterv1.ConditionType = "ServiceDiscoveryReady" + ServiceDiscoveryReadyCondition clusterv1beta1.ConditionType = "ServiceDiscoveryReady" // SupervisorHeadlessServiceSetupFailedReason documents the headless service setup for svc api server failed. SupervisorHeadlessServiceSetupFailedReason = "SupervisorHeadlessServiceSetupFailed" diff --git a/apis/vmware/v1beta1/vspherecluster_types.go b/apis/vmware/v1beta1/vspherecluster_types.go index e3eda76246..481b665107 100644 --- a/apis/vmware/v1beta1/vspherecluster_types.go +++ b/apis/vmware/v1beta1/vspherecluster_types.go @@ -18,7 +18,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( @@ -38,17 +38,17 @@ const ( const ( // VSphereClusterReadyV1Beta2Condition is true if the VSphereCluster's deletionTimestamp is not set, VSphereCluster's // ResourcePolicyReady, NetworkReady, LoadBalancerReady, ProviderServiceAccountsReady and ServiceDiscoveryReady conditions are true. - VSphereClusterReadyV1Beta2Condition = clusterv1.ReadyV1Beta2Condition + VSphereClusterReadyV1Beta2Condition = clusterv1beta1.ReadyV1Beta2Condition // VSphereClusterReadyV1Beta2Reason surfaces when the VSphereCluster readiness criteria is met. - VSphereClusterReadyV1Beta2Reason = clusterv1.ReadyV1Beta2Reason + VSphereClusterReadyV1Beta2Reason = clusterv1beta1.ReadyV1Beta2Reason // VSphereClusterNotReadyV1Beta2Reason surfaces when the VSphereCluster readiness criteria is not met. - VSphereClusterNotReadyV1Beta2Reason = clusterv1.NotReadyV1Beta2Reason + VSphereClusterNotReadyV1Beta2Reason = clusterv1beta1.NotReadyV1Beta2Reason // VSphereClusterReadyUnknownV1Beta2Reason surfaces when at least one VSphereCluster readiness criteria is unknown // and no VSphereCluster readiness criteria is not met. - VSphereClusterReadyUnknownV1Beta2Reason = clusterv1.ReadyUnknownV1Beta2Reason + VSphereClusterReadyUnknownV1Beta2Reason = clusterv1beta1.ReadyUnknownV1Beta2Reason ) // VSphereCluster's ResourcePolicyReady condition and corresponding reasons that will be used in v1Beta2 API version. @@ -57,13 +57,13 @@ const ( VSphereClusterResourcePolicyReadyV1Beta2Condition = "ResourcePolicyReady" // VSphereClusterResourcePolicyReadyV1Beta2Reason surfaces when the ResourcePolicy for a VSphereCluster is ready. - VSphereClusterResourcePolicyReadyV1Beta2Reason = clusterv1.ReadyV1Beta2Reason + VSphereClusterResourcePolicyReadyV1Beta2Reason = clusterv1beta1.ReadyV1Beta2Reason // VSphereClusterResourcePolicyNotReadyV1Beta2Reason surfaces when the ResourcePolicy for a VSphereCluster is not ready. - VSphereClusterResourcePolicyNotReadyV1Beta2Reason = clusterv1.NotReadyV1Beta2Reason + VSphereClusterResourcePolicyNotReadyV1Beta2Reason = clusterv1beta1.NotReadyV1Beta2Reason // VSphereClusterResourcePolicyReadyDeletingV1Beta2Reason surfaces when the resource policy for a VSphereCluster is being deleted. - VSphereClusterResourcePolicyReadyDeletingV1Beta2Reason = clusterv1.DeletingV1Beta2Reason + VSphereClusterResourcePolicyReadyDeletingV1Beta2Reason = clusterv1beta1.DeletingV1Beta2Reason ) // VSphereCluster's NetworkReady condition and corresponding reasons that will be used in v1Beta2 API version. @@ -72,13 +72,13 @@ const ( VSphereClusterNetworkReadyV1Beta2Condition = "NetworkReady" // VSphereClusterNetworkReadyV1Beta2Reason surfaces when the network for a VSphereCluster is ready. - VSphereClusterNetworkReadyV1Beta2Reason = clusterv1.ReadyV1Beta2Reason + VSphereClusterNetworkReadyV1Beta2Reason = clusterv1beta1.ReadyV1Beta2Reason // VSphereClusterNetworkNotReadyV1Beta2Reason surfaces when the network for a VSphereCluster is not ready. - VSphereClusterNetworkNotReadyV1Beta2Reason = clusterv1.NotReadyV1Beta2Reason + VSphereClusterNetworkNotReadyV1Beta2Reason = clusterv1beta1.NotReadyV1Beta2Reason // VSphereClusterNetworkReadyDeletingV1Beta2Reason surfaces when the network for a VSphereCluster is being deleted. - VSphereClusterNetworkReadyDeletingV1Beta2Reason = clusterv1.DeletingV1Beta2Reason + VSphereClusterNetworkReadyDeletingV1Beta2Reason = clusterv1beta1.DeletingV1Beta2Reason ) // VSphereCluster's LoadBalancerReady condition and corresponding reasons that will be used in v1Beta2 API version. @@ -87,16 +87,16 @@ const ( VSphereClusterLoadBalancerReadyV1Beta2Condition = "LoadBalancerReady" // VSphereClusterLoadBalancerReadyV1Beta2Reason surfaces when the LoadBalancer for a VSphereCluster is ready. - VSphereClusterLoadBalancerReadyV1Beta2Reason = clusterv1.ReadyV1Beta2Reason + VSphereClusterLoadBalancerReadyV1Beta2Reason = clusterv1beta1.ReadyV1Beta2Reason // VSphereClusterLoadBalancerNotReadyV1Beta2Reason surfaces when the LoadBalancer for a VSphereCluster is not ready. - VSphereClusterLoadBalancerNotReadyV1Beta2Reason = clusterv1.NotReadyV1Beta2Reason + VSphereClusterLoadBalancerNotReadyV1Beta2Reason = clusterv1beta1.NotReadyV1Beta2Reason // VSphereClusterLoadBalancerWaitingForIPV1Beta2Reason surfaces when the LoadBalancer for a VSphereCluster is waiting for an IP to be assigned. VSphereClusterLoadBalancerWaitingForIPV1Beta2Reason = "WaitingForIP" // VSphereClusterLoadBalancerDeletingV1Beta2Reason surfaces when the LoadBalancer for a VSphereCluster is being deleted. - VSphereClusterLoadBalancerDeletingV1Beta2Reason = clusterv1.DeletingV1Beta2Reason + VSphereClusterLoadBalancerDeletingV1Beta2Reason = clusterv1beta1.DeletingV1Beta2Reason ) // VSphereCluster's ProviderServiceAccountsReady condition and corresponding reasons that will be used in v1Beta2 API version. @@ -105,10 +105,10 @@ const ( VSphereClusterProviderServiceAccountsReadyV1Beta2Condition = "ProviderServiceAccountsReady" // VSphereClusterProviderServiceAccountsReadyV1Beta2Reason surfaces when the provider service accounts for a VSphereCluster is ready. - VSphereClusterProviderServiceAccountsReadyV1Beta2Reason = clusterv1.ReadyV1Beta2Reason + VSphereClusterProviderServiceAccountsReadyV1Beta2Reason = clusterv1beta1.ReadyV1Beta2Reason // VSphereClusterProviderServiceAccountsNotReadyV1Beta2Reason surfaces when the provider service accounts for a VSphereCluster is not ready. - VSphereClusterProviderServiceAccountsNotReadyV1Beta2Reason = clusterv1.NotReadyV1Beta2Reason + VSphereClusterProviderServiceAccountsNotReadyV1Beta2Reason = clusterv1beta1.NotReadyV1Beta2Reason ) // VSphereCluster's ServiceDiscoveryReady condition and corresponding reasons that will be used in v1Beta2 API version. @@ -117,16 +117,16 @@ const ( VSphereClusterServiceDiscoveryReadyV1Beta2Condition = "ServiceDiscoveryReady" // VSphereClusterServiceDiscoveryReadyV1Beta2Reason surfaces when the service discovery for a VSphereCluster is ready. - VSphereClusterServiceDiscoveryReadyV1Beta2Reason = clusterv1.ReadyV1Beta2Reason + VSphereClusterServiceDiscoveryReadyV1Beta2Reason = clusterv1beta1.ReadyV1Beta2Reason // VSphereClusterServiceDiscoveryNotReadyV1Beta2Reason surfaces when the service discovery for a VSphereCluster is not ready. - VSphereClusterServiceDiscoveryNotReadyV1Beta2Reason = clusterv1.NotReadyV1Beta2Reason + VSphereClusterServiceDiscoveryNotReadyV1Beta2Reason = clusterv1beta1.NotReadyV1Beta2Reason ) // VSphereClusterSpec defines the desired state of VSphereCluster. type VSphereClusterSpec struct { // +optional - ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"` + ControlPlaneEndpoint clusterv1beta1.APIEndpoint `json:"controlPlaneEndpoint"` } // VSphereClusterStatus defines the observed state of VSphereClusterSpec. @@ -143,11 +143,11 @@ type VSphereClusterStatus struct { // Conditions defines current service state of the VSphereCluster. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // FailureDomains is a list of failure domain objects synced from the // infrastructure provider. - FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` + FailureDomains clusterv1beta1.FailureDomains `json:"failureDomains,omitempty"` // v1beta2 groups all the fields that will be added or modified in VSphereCluster's status with the V1Beta2 version. // +optional @@ -191,12 +191,12 @@ type VSphereClusterList struct { } // GetConditions returns conditions for VSphereCluster. -func (r *VSphereCluster) GetConditions() clusterv1.Conditions { +func (r *VSphereCluster) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } // SetConditions sets conditions on the VSphereCluster. -func (r *VSphereCluster) SetConditions(conditions clusterv1.Conditions) { +func (r *VSphereCluster) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/apis/vmware/v1beta1/vspheremachine_types.go b/apis/vmware/v1beta1/vspheremachine_types.go index 4b0a91319f..d16372ca67 100644 --- a/apis/vmware/v1beta1/vspheremachine_types.go +++ b/apis/vmware/v1beta1/vspheremachine_types.go @@ -19,7 +19,7 @@ package v1beta1 import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/errors" ) @@ -175,7 +175,7 @@ type VSphereMachineStatus struct { // Conditions defines current service state of the VSphereMachine. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // v1beta2 groups all the fields that will be added or modified in VSphereMachine's status with the V1Beta2 version. // +optional @@ -221,12 +221,12 @@ type VSphereMachineList struct { } // GetConditions returns the conditions for the VSphereMachine. -func (r *VSphereMachine) GetConditions() clusterv1.Conditions { +func (r *VSphereMachine) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } // SetConditions sets conditions on the VSphereMachine. -func (r *VSphereMachine) SetConditions(conditions clusterv1.Conditions) { +func (r *VSphereMachine) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/apis/vmware/v1beta1/zz_generated.deepcopy.go b/apis/vmware/v1beta1/zz_generated.deepcopy.go index 7358ccaa1c..040ce03677 100644 --- a/apis/vmware/v1beta1/zz_generated.deepcopy.go +++ b/apis/vmware/v1beta1/zz_generated.deepcopy.go @@ -25,7 +25,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + corev1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/errors" ) @@ -209,14 +209,14 @@ func (in *VSphereClusterStatus) DeepCopyInto(out *VSphereClusterStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } if in.FailureDomains != nil { in, out := &in.FailureDomains, &out.FailureDomains - *out = make(apiv1beta1.FailureDomains, len(*in)) + *out = make(corev1beta1.FailureDomains, len(*in)) for key, val := range *in { (*out)[key] = *val.DeepCopy() } @@ -471,7 +471,7 @@ func (in *VSphereMachineStatus) DeepCopyInto(out *VSphereMachineStatus) { } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vsphereclusteridentities.yaml b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vsphereclusteridentities.yaml index af32776847..ab9bb860cb 100644 --- a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vsphereclusteridentities.yaml +++ b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vsphereclusteridentities.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.0 + controller-gen.kubebuilder.io/version: v0.18.0 name: vsphereclusteridentities.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vsphereclusters.yaml b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vsphereclusters.yaml index 97df12fdec..56e0d53d43 100644 --- a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vsphereclusters.yaml +++ b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vsphereclusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.0 + controller-gen.kubebuilder.io/version: v0.18.0 name: vsphereclusters.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vsphereclustertemplates.yaml b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vsphereclustertemplates.yaml index 43b2d8d5e1..30d21c4665 100644 --- a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vsphereclustertemplates.yaml +++ b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vsphereclustertemplates.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.0 + controller-gen.kubebuilder.io/version: v0.18.0 name: vsphereclustertemplates.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspheredeploymentzones.yaml b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspheredeploymentzones.yaml index a2860cbc3f..4d6c7c43c7 100644 --- a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspheredeploymentzones.yaml +++ b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspheredeploymentzones.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.0 + controller-gen.kubebuilder.io/version: v0.18.0 name: vspheredeploymentzones.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspherefailuredomains.yaml b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspherefailuredomains.yaml index 57e334f577..d9ad282340 100644 --- a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspherefailuredomains.yaml +++ b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspherefailuredomains.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.0 + controller-gen.kubebuilder.io/version: v0.18.0 name: vspherefailuredomains.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspheremachines.yaml b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspheremachines.yaml index 300190d0d3..c19e7b6db9 100644 --- a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspheremachines.yaml +++ b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspheremachines.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.0 + controller-gen.kubebuilder.io/version: v0.18.0 name: vspheremachines.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspheremachinetemplates.yaml b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspheremachinetemplates.yaml index f7fa965da5..e2f7eb96ff 100644 --- a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspheremachinetemplates.yaml +++ b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspheremachinetemplates.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.0 + controller-gen.kubebuilder.io/version: v0.18.0 name: vspheremachinetemplates.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspherevms.yaml b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspherevms.yaml index 5f517e496f..e91fe3fe5b 100644 --- a/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspherevms.yaml +++ b/config/default/crd/bases/infrastructure.cluster.x-k8s.io_vspherevms.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.0 + controller-gen.kubebuilder.io/version: v0.18.0 name: vspherevms.infrastructure.cluster.x-k8s.io spec: group: infrastructure.cluster.x-k8s.io diff --git a/config/supervisor/crd/bases/vmware.infrastructure.cluster.x-k8s.io_providerserviceaccounts.yaml b/config/supervisor/crd/bases/vmware.infrastructure.cluster.x-k8s.io_providerserviceaccounts.yaml index c72b24770a..370bb81c99 100644 --- a/config/supervisor/crd/bases/vmware.infrastructure.cluster.x-k8s.io_providerserviceaccounts.yaml +++ b/config/supervisor/crd/bases/vmware.infrastructure.cluster.x-k8s.io_providerserviceaccounts.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.0 + controller-gen.kubebuilder.io/version: v0.18.0 name: providerserviceaccounts.vmware.infrastructure.cluster.x-k8s.io spec: group: vmware.infrastructure.cluster.x-k8s.io diff --git a/config/supervisor/crd/bases/vmware.infrastructure.cluster.x-k8s.io_vsphereclusters.yaml b/config/supervisor/crd/bases/vmware.infrastructure.cluster.x-k8s.io_vsphereclusters.yaml index cce3c089a5..99876c3ba1 100644 --- a/config/supervisor/crd/bases/vmware.infrastructure.cluster.x-k8s.io_vsphereclusters.yaml +++ b/config/supervisor/crd/bases/vmware.infrastructure.cluster.x-k8s.io_vsphereclusters.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.0 + controller-gen.kubebuilder.io/version: v0.18.0 name: vsphereclusters.vmware.infrastructure.cluster.x-k8s.io spec: group: vmware.infrastructure.cluster.x-k8s.io diff --git a/config/supervisor/crd/bases/vmware.infrastructure.cluster.x-k8s.io_vsphereclustertemplates.yaml b/config/supervisor/crd/bases/vmware.infrastructure.cluster.x-k8s.io_vsphereclustertemplates.yaml index 7d993579f7..2192b01835 100644 --- a/config/supervisor/crd/bases/vmware.infrastructure.cluster.x-k8s.io_vsphereclustertemplates.yaml +++ b/config/supervisor/crd/bases/vmware.infrastructure.cluster.x-k8s.io_vsphereclustertemplates.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.0 + controller-gen.kubebuilder.io/version: v0.18.0 name: vsphereclustertemplates.vmware.infrastructure.cluster.x-k8s.io spec: group: vmware.infrastructure.cluster.x-k8s.io diff --git a/config/supervisor/crd/bases/vmware.infrastructure.cluster.x-k8s.io_vspheremachines.yaml b/config/supervisor/crd/bases/vmware.infrastructure.cluster.x-k8s.io_vspheremachines.yaml index 498429aad5..8a846743f2 100644 --- a/config/supervisor/crd/bases/vmware.infrastructure.cluster.x-k8s.io_vspheremachines.yaml +++ b/config/supervisor/crd/bases/vmware.infrastructure.cluster.x-k8s.io_vspheremachines.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.0 + controller-gen.kubebuilder.io/version: v0.18.0 name: vspheremachines.vmware.infrastructure.cluster.x-k8s.io spec: group: vmware.infrastructure.cluster.x-k8s.io diff --git a/config/supervisor/crd/bases/vmware.infrastructure.cluster.x-k8s.io_vspheremachinetemplates.yaml b/config/supervisor/crd/bases/vmware.infrastructure.cluster.x-k8s.io_vspheremachinetemplates.yaml index dfc85b2958..40b98422be 100644 --- a/config/supervisor/crd/bases/vmware.infrastructure.cluster.x-k8s.io_vspheremachinetemplates.yaml +++ b/config/supervisor/crd/bases/vmware.infrastructure.cluster.x-k8s.io_vspheremachinetemplates.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.0 + controller-gen.kubebuilder.io/version: v0.18.0 name: vspheremachinetemplates.vmware.infrastructure.cluster.x-k8s.io spec: group: vmware.infrastructure.cluster.x-k8s.io diff --git a/controllers/clustermodule_reconciler.go b/controllers/clustermodule_reconciler.go index a2bed77da1..071c1969e3 100644 --- a/controllers/clustermodule_reconciler.go +++ b/controllers/clustermodule_reconciler.go @@ -24,11 +24,12 @@ import ( "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + deprecatedv1beta2conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions/v1beta2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -68,9 +69,9 @@ func (r Reconciler) Reconcile(ctx context.Context, clusterCtx *capvcontext.Clust log := ctrl.LoggerFrom(ctx) if !clustermodule.IsClusterCompatible(clusterCtx) { - conditions.MarkFalse(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition, infrav1.VCenterVersionIncompatibleReason, clusterv1.ConditionSeverityInfo, + deprecatedconditions.MarkFalse(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition, infrav1.VCenterVersionIncompatibleReason, clusterv1beta1.ConditionSeverityInfo, "vCenter version %s does not support cluster modules", clusterCtx.VSphereCluster.Status.VCenterVersion) - v1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ + deprecatedv1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ Type: infrav1.VSphereClusterClusterModulesReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereClusterModulesInvalidVCenterVersionV1Beta2Reason, @@ -175,9 +176,9 @@ func (r Reconciler) Reconcile(ctx context.Context, clusterCtx *capvcontext.Clust } else { err = errors.New(generateClusterModuleErrorMessage(modErrs)) } - conditions.MarkFalse(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition, infrav1.ClusterModuleSetupFailedReason, - clusterv1.ConditionSeverityWarning, generateClusterModuleErrorMessage(modErrs)) - v1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ + deprecatedconditions.MarkFalse(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition, infrav1.ClusterModuleSetupFailedReason, + clusterv1beta1.ConditionSeverityWarning, "%s", generateClusterModuleErrorMessage(modErrs)) + deprecatedv1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ Type: infrav1.VSphereClusterClusterModulesReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereClusterClusterModulesNotReadyV1Beta2Reason, @@ -185,11 +186,11 @@ func (r Reconciler) Reconcile(ctx context.Context, clusterCtx *capvcontext.Clust }) return reconcile.Result{}, err case len(modErrs) == 0 && len(clusterModuleSpecs) > 0: - conditions.MarkTrue(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition) + deprecatedconditions.MarkTrue(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition) default: - conditions.Delete(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition) + deprecatedconditions.Delete(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition) } - v1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ + deprecatedv1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ Type: infrav1.VSphereClusterClusterModulesReadyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: infrav1.VSphereClusterClusterModulesReadyV1Beta2Reason, diff --git a/controllers/clustermodule_reconciler_test.go b/controllers/clustermodule_reconciler_test.go index 96c00fa6ce..aba0f29799 100644 --- a/controllers/clustermodule_reconciler_test.go +++ b/controllers/clustermodule_reconciler_test.go @@ -25,9 +25,9 @@ import ( "github.com/pkg/errors" "github.com/stretchr/testify/mock" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" @@ -145,8 +145,8 @@ func TestReconciler_Reconcile(t *testing.T) { g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[0].ModuleUUID).To(gomega.BeElementOf(kcpUUID+"a", mdUUID+"a")) g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[1].ModuleUUID).To(gomega.BeElementOf(kcpUUID+"a", mdUUID+"a")) // Check that condition got set. - g.Expect(conditions.Has(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) - g.Expect(conditions.IsTrue(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(deprecatedconditions.Has(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(deprecatedconditions.IsTrue(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) }, }, { @@ -174,9 +174,9 @@ func TestReconciler_Reconcile(t *testing.T) { g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[0].ModuleUUID).To(gomega.BeElementOf(kcpUUID, mdUUID)) g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[1].ModuleUUID).To(gomega.BeElementOf(kcpUUID, mdUUID)) // Check that condition got set. - g.Expect(conditions.Has(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) - g.Expect(conditions.IsFalse(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) - g.Expect(conditions.Get(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition).Message).To(gomega.ContainSubstring(vCenter500err.Error())) + g.Expect(deprecatedconditions.Has(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(deprecatedconditions.IsFalse(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(deprecatedconditions.Get(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition).Message).To(gomega.ContainSubstring(vCenter500err.Error())) }, }, { @@ -204,9 +204,9 @@ func TestReconciler_Reconcile(t *testing.T) { g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[0].ModuleUUID).To(gomega.BeElementOf(kcpUUID, mdUUID)) g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[1].ModuleUUID).To(gomega.BeElementOf(kcpUUID, mdUUID)) // Check that condition got set. - g.Expect(conditions.Has(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) - g.Expect(conditions.IsFalse(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) - g.Expect(conditions.Get(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition).Message).To(gomega.ContainSubstring(vCenter500err.Error())) + g.Expect(deprecatedconditions.Has(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(deprecatedconditions.IsFalse(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(deprecatedconditions.Get(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition).Message).To(gomega.ContainSubstring(vCenter500err.Error())) }, }, { @@ -235,9 +235,9 @@ func TestReconciler_Reconcile(t *testing.T) { g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[0].ModuleUUID).To(gomega.BeElementOf(kcpUUID+"a", mdUUID)) g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[1].ModuleUUID).To(gomega.BeElementOf(kcpUUID+"a", mdUUID)) // Check that condition got set. - g.Expect(conditions.Has(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) - g.Expect(conditions.IsFalse(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) - g.Expect(conditions.Get(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition).Message).To(gomega.ContainSubstring(vCenter500err.Error())) + g.Expect(deprecatedconditions.Has(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(deprecatedconditions.IsFalse(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(deprecatedconditions.Get(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition).Message).To(gomega.ContainSubstring(vCenter500err.Error())) }, }, { @@ -253,9 +253,9 @@ func TestReconciler_Reconcile(t *testing.T) { g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[0].ModuleUUID).To(gomega.Equal(mdUUID)) g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[0].ControlPlane).To(gomega.BeFalse()) - g.Expect(conditions.Has(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) - g.Expect(conditions.IsFalse(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) - g.Expect(conditions.Get(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition).Message).To(gomega.ContainSubstring("kcp")) + g.Expect(deprecatedconditions.Has(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(deprecatedconditions.IsFalse(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(deprecatedconditions.Get(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition).Message).To(gomega.ContainSubstring("kcp")) }, }, { @@ -273,9 +273,9 @@ func TestReconciler_Reconcile(t *testing.T) { g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[0].ModuleUUID).To(gomega.Equal(kcpUUID)) g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules[0].ControlPlane).To(gomega.BeTrue()) - g.Expect(conditions.Has(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) - g.Expect(conditions.IsFalse(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) - g.Expect(conditions.Get(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition).Message).To(gomega.ContainSubstring("md")) + g.Expect(deprecatedconditions.Has(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(deprecatedconditions.IsFalse(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(deprecatedconditions.Get(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition).Message).To(gomega.ContainSubstring("md")) }, }, { @@ -289,9 +289,9 @@ func TestReconciler_Reconcile(t *testing.T) { haveError: false, customAssert: func(g *gomega.WithT, clusterCtx *capvcontext.ClusterContext) { g.Expect(clusterCtx.VSphereCluster.Spec.ClusterModules).To(gomega.BeEmpty()) - g.Expect(conditions.Has(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) - g.Expect(conditions.IsFalse(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) - g.Expect(conditions.Get(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition).Message).To(gomega.ContainSubstring("kcp")) + g.Expect(deprecatedconditions.Has(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(deprecatedconditions.IsFalse(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition)).To(gomega.BeTrue()) + g.Expect(deprecatedconditions.Get(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition).Message).To(gomega.ContainSubstring("kcp")) }, }, { diff --git a/controllers/controllers_suite_test.go b/controllers/controllers_suite_test.go index c8a5b132b6..baaaf80e81 100644 --- a/controllers/controllers_suite_test.go +++ b/controllers/controllers_suite_test.go @@ -30,7 +30,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/kubernetes/scheme" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/controllers/clustercache" "sigs.k8s.io/cluster-api/controllers/remote" ctrl "sigs.k8s.io/controller-runtime" diff --git a/controllers/vmware/controllers_suite_test.go b/controllers/vmware/controllers_suite_test.go index 330e1518eb..87d99112e0 100644 --- a/controllers/vmware/controllers_suite_test.go +++ b/controllers/vmware/controllers_suite_test.go @@ -31,7 +31,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/kubernetes/scheme" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/controllers/clustercache" "sigs.k8s.io/cluster-api/controllers/remote" ctrl "sigs.k8s.io/controller-runtime" diff --git a/controllers/vmware/serviceaccount_controller.go b/controllers/vmware/serviceaccount_controller.go index 05f5d06d99..ee64ae9391 100644 --- a/controllers/vmware/serviceaccount_controller.go +++ b/controllers/vmware/serviceaccount_controller.go @@ -31,13 +31,14 @@ import ( kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/controllers/clustercache" clusterutilv1 "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/conditions" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" - "sigs.k8s.io/cluster-api/util/patch" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + deprecatedv1beta2conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions/v1beta2" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -193,7 +194,7 @@ func (r *ServiceAccountReconciler) Reconcile(ctx context.Context, req reconcile. func (r *ServiceAccountReconciler) patch(ctx context.Context, clusterCtx *vmwarecontext.ClusterContext) error { // NOTE: this controller only owns the ProviderServiceAccountsReady condition on the VSphereCluster object. return clusterCtx.PatchHelper.Patch(ctx, clusterCtx.VSphereCluster, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ vmwarev1.ProviderServiceAccountsReadyCondition, }}, patch.WithOwnedV1Beta2Conditions{Conditions: []string{ @@ -206,17 +207,17 @@ func (r *ServiceAccountReconciler) patch(ctx context.Context, clusterCtx *vmware func (r *ServiceAccountReconciler) reconcileNormal(ctx context.Context, guestClusterCtx *vmwarecontext.GuestClusterContext) (_ reconcile.Result, reterr error) { defer func() { if reterr != nil { - conditions.MarkFalse(guestClusterCtx.VSphereCluster, vmwarev1.ProviderServiceAccountsReadyCondition, vmwarev1.ProviderServiceAccountsReconciliationFailedReason, - clusterv1.ConditionSeverityWarning, reterr.Error()) - v1beta2conditions.Set(guestClusterCtx.VSphereCluster, metav1.Condition{ + deprecatedconditions.MarkFalse(guestClusterCtx.VSphereCluster, vmwarev1.ProviderServiceAccountsReadyCondition, vmwarev1.ProviderServiceAccountsReconciliationFailedReason, + clusterv1beta1.ConditionSeverityWarning, "%v", reterr) + deprecatedv1beta2conditions.Set(guestClusterCtx.VSphereCluster, metav1.Condition{ Type: vmwarev1.VSphereClusterProviderServiceAccountsReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: vmwarev1.VSphereClusterProviderServiceAccountsNotReadyV1Beta2Reason, Message: reterr.Error(), }) } else { - conditions.MarkTrue(guestClusterCtx.VSphereCluster, vmwarev1.ProviderServiceAccountsReadyCondition) - v1beta2conditions.Set(guestClusterCtx.VSphereCluster, metav1.Condition{ + deprecatedconditions.MarkTrue(guestClusterCtx.VSphereCluster, vmwarev1.ProviderServiceAccountsReadyCondition) + deprecatedv1beta2conditions.Set(guestClusterCtx.VSphereCluster, metav1.Condition{ Type: vmwarev1.VSphereClusterProviderServiceAccountsReadyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: vmwarev1.VSphereClusterProviderServiceAccountsReadyV1Beta2Reason, diff --git a/controllers/vmware/serviceaccount_controller_intg_test.go b/controllers/vmware/serviceaccount_controller_intg_test.go index 54ffe915a0..33279b7e7f 100644 --- a/controllers/vmware/serviceaccount_controller_intg_test.go +++ b/controllers/vmware/serviceaccount_controller_intg_test.go @@ -32,8 +32,8 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" "sigs.k8s.io/cluster-api/controllers/clustercache" - "sigs.k8s.io/cluster-api/util/conditions" - "sigs.k8s.io/cluster-api/util/patch" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/controller-runtime/pkg/client" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" @@ -77,7 +77,7 @@ var _ = Describe("ProviderServiceAccount controller integration tests", func() { vmwarehelpers.CreateAndWait(ctx, intCtx.Client, intCtx.Cluster) vmwarehelpers.CreateAndWait(ctx, intCtx.Client, intCtx.VSphereCluster) vmwarehelpers.CreateAndWait(ctx, intCtx.Client, intCtx.KubeconfigSecret) - vmwarehelpers.ClusterInfrastructureReady(ctx, intCtx.Client, clusterCache, intCtx.Cluster) + vmwarehelpers.ClusterInfrastructureProvisioned(ctx, intCtx.Client, clusterCache, intCtx.Cluster) }) By("Verifying that the guest cluster client works") @@ -174,7 +174,7 @@ var _ = Describe("ProviderServiceAccount controller integration tests", func() { vsphereCluster := &vmwarev1.VSphereCluster{} key := client.ObjectKey{Namespace: intCtx.Namespace, Name: intCtx.VSphereCluster.GetName()} Expect(intCtx.Client.Get(ctx, key, vsphereCluster)).To(Succeed()) - Expect(conditions.Has(vsphereCluster, vmwarev1.ProviderServiceAccountsReadyCondition)).To(BeFalse()) + Expect(deprecatedconditions.Has(vsphereCluster, vmwarev1.ProviderServiceAccountsReadyCondition)).To(BeFalse()) }) }) }) @@ -196,7 +196,7 @@ var _ = Describe("ProviderServiceAccount controller integration tests", func() { vsphereCluster := &vmwarev1.VSphereCluster{} key := client.ObjectKey{Namespace: intCtx.Namespace, Name: intCtx.VSphereCluster.GetName()} Expect(intCtx.Client.Get(ctx, key, vsphereCluster)).To(Succeed()) - Expect(conditions.Has(vsphereCluster, vmwarev1.ProviderServiceAccountsReadyCondition)).To(BeFalse()) + Expect(deprecatedconditions.Has(vsphereCluster, vmwarev1.ProviderServiceAccountsReadyCondition)).To(BeFalse()) }) }) }) @@ -210,7 +210,7 @@ var _ = Describe("ProviderServiceAccount controller integration tests", func() { vmwarehelpers.CreateAndWait(ctx, intCtx.Client, intCtx.Cluster) vmwarehelpers.CreateAndWait(ctx, intCtx.Client, intCtx.VSphereCluster) vmwarehelpers.CreateAndWait(ctx, intCtx.Client, intCtx.KubeconfigSecret) - vmwarehelpers.ClusterInfrastructureReady(ctx, intCtx.Client, clusterCache, intCtx.Cluster) + vmwarehelpers.ClusterInfrastructureProvisioned(ctx, intCtx.Client, clusterCache, intCtx.Cluster) }) pSvcAccount = getTestProviderServiceAccount(intCtx.Namespace, intCtx.VSphereCluster) pSvcAccount.Spec.TargetNamespace = "default" diff --git a/controllers/vmware/serviceaccount_controller_suite_test.go b/controllers/vmware/serviceaccount_controller_suite_test.go index 072e3b8d28..87769e2127 100644 --- a/controllers/vmware/serviceaccount_controller_suite_test.go +++ b/controllers/vmware/serviceaccount_controller_suite_test.go @@ -27,8 +27,8 @@ import ( rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/controller-runtime/pkg/client" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" @@ -154,8 +154,8 @@ func assertRoleBinding(ctx context.Context, ctrlClient client.Client, namespace, } // assertProviderServiceAccountsCondition asserts the condition on the ProviderServiceAccount CR. -func assertProviderServiceAccountsCondition(vCluster *vmwarev1.VSphereCluster, status corev1.ConditionStatus, message string, reason string, severity clusterv1.ConditionSeverity) { - c := conditions.Get(vCluster, vmwarev1.ProviderServiceAccountsReadyCondition) +func assertProviderServiceAccountsCondition(vCluster *vmwarev1.VSphereCluster, status corev1.ConditionStatus, message string, reason string, severity clusterv1beta1.ConditionSeverity) { + c := deprecatedconditions.Get(vCluster, vmwarev1.ProviderServiceAccountsReadyCondition) Expect(c).NotTo(BeNil()) Expect(c.Status).To(Equal(status)) Expect(c.Reason).To(Equal(reason)) diff --git a/controllers/vmware/servicediscovery_controller.go b/controllers/vmware/servicediscovery_controller.go index 5c8d2d5e0a..50bbf316a2 100644 --- a/controllers/vmware/servicediscovery_controller.go +++ b/controllers/vmware/servicediscovery_controller.go @@ -36,13 +36,14 @@ import ( "k8s.io/client-go/tools/record" bootstrapapi "k8s.io/cluster-bootstrap/token/api" "k8s.io/klog/v2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/controllers/clustercache" clusterutilv1 "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/conditions" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" - "sigs.k8s.io/cluster-api/util/patch" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + deprecatedv1beta2conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions/v1beta2" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/cache" @@ -226,7 +227,7 @@ func (r *serviceDiscoveryReconciler) Reconcile(ctx context.Context, req reconcil func (r *serviceDiscoveryReconciler) patch(ctx context.Context, clusterCtx *vmwarecontext.ClusterContext) error { // NOTE: this controller only owns the ServiceDiscoveryReady condition on the VSphereCluster object. return clusterCtx.PatchHelper.Patch(ctx, clusterCtx.VSphereCluster, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ vmwarev1.ServiceDiscoveryReadyCondition, }}, patch.WithOwnedV1Beta2Conditions{Conditions: []string{ @@ -237,9 +238,9 @@ func (r *serviceDiscoveryReconciler) patch(ctx context.Context, clusterCtx *vmwa func (r *serviceDiscoveryReconciler) reconcileNormal(ctx context.Context, guestClusterCtx *vmwarecontext.GuestClusterContext) error { if err := r.reconcileSupervisorHeadlessService(ctx, guestClusterCtx); err != nil { - conditions.MarkFalse(guestClusterCtx.VSphereCluster, vmwarev1.ServiceDiscoveryReadyCondition, vmwarev1.SupervisorHeadlessServiceSetupFailedReason, - clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(guestClusterCtx.VSphereCluster, metav1.Condition{ + deprecatedconditions.MarkFalse(guestClusterCtx.VSphereCluster, vmwarev1.ServiceDiscoveryReadyCondition, vmwarev1.SupervisorHeadlessServiceSetupFailedReason, + clusterv1beta1.ConditionSeverityWarning, "%v", err) + deprecatedv1beta2conditions.Set(guestClusterCtx.VSphereCluster, metav1.Condition{ Type: vmwarev1.VSphereClusterServiceDiscoveryReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: vmwarev1.VSphereClusterServiceDiscoveryNotReadyV1Beta2Reason, @@ -281,9 +282,9 @@ func (r *serviceDiscoveryReconciler) reconcileSupervisorHeadlessService(ctx cont if err != nil { // Note: We have watches on the LB Svc (VIP) & the cluster-info configmap (FIP). // There is no need to return an error to keep re-trying. - conditions.MarkFalse(guestClusterCtx.VSphereCluster, vmwarev1.ServiceDiscoveryReadyCondition, vmwarev1.SupervisorHeadlessServiceSetupFailedReason, - clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(guestClusterCtx.VSphereCluster, metav1.Condition{ + deprecatedconditions.MarkFalse(guestClusterCtx.VSphereCluster, vmwarev1.ServiceDiscoveryReadyCondition, vmwarev1.SupervisorHeadlessServiceSetupFailedReason, + clusterv1beta1.ConditionSeverityWarning, "%v", err) + deprecatedv1beta2conditions.Set(guestClusterCtx.VSphereCluster, metav1.Condition{ Type: vmwarev1.VSphereClusterServiceDiscoveryReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: vmwarev1.VSphereClusterServiceDiscoveryNotReadyV1Beta2Reason, @@ -337,8 +338,8 @@ func (r *serviceDiscoveryReconciler) reconcileSupervisorHeadlessService(ctx cont log.Error(nil, "Unexpected result during createOrPatch service Endpoints", "endpointsSubsets", endpointsSubsetsStr, "operationResult", result) } - conditions.MarkTrue(guestClusterCtx.VSphereCluster, vmwarev1.ServiceDiscoveryReadyCondition) - v1beta2conditions.Set(guestClusterCtx.VSphereCluster, metav1.Condition{ + deprecatedconditions.MarkTrue(guestClusterCtx.VSphereCluster, vmwarev1.ServiceDiscoveryReadyCondition) + deprecatedv1beta2conditions.Set(guestClusterCtx.VSphereCluster, metav1.Condition{ Type: vmwarev1.VSphereClusterServiceDiscoveryReadyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: vmwarev1.VSphereClusterServiceDiscoveryReadyV1Beta2Reason, diff --git a/controllers/vmware/servicediscovery_controller_intg_test.go b/controllers/vmware/servicediscovery_controller_intg_test.go index 24cf32f1f7..31f4bba1a9 100644 --- a/controllers/vmware/servicediscovery_controller_intg_test.go +++ b/controllers/vmware/servicediscovery_controller_intg_test.go @@ -53,7 +53,7 @@ var _ = Describe("Service Discovery controller integration tests", func() { vmwarehelpers.CreateAndWait(ctx, intCtx.Client, intCtx.Cluster) vmwarehelpers.CreateAndWait(ctx, intCtx.Client, intCtx.VSphereCluster) vmwarehelpers.CreateAndWait(ctx, intCtx.Client, intCtx.KubeconfigSecret) - vmwarehelpers.ClusterInfrastructureReady(ctx, intCtx.Client, clusterCache, intCtx.Cluster) + vmwarehelpers.ClusterInfrastructureProvisioned(ctx, intCtx.Client, clusterCache, intCtx.Cluster) }) By("Verifying that the guest cluster client works") diff --git a/controllers/vmware/servicediscovery_controller_suite_test.go b/controllers/vmware/servicediscovery_controller_suite_test.go index 9e22abf813..d27efd58a1 100644 --- a/controllers/vmware/servicediscovery_controller_suite_test.go +++ b/controllers/vmware/servicediscovery_controller_suite_test.go @@ -28,8 +28,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" bootstrapapi "k8s.io/cluster-bootstrap/token/api" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/controller-runtime/pkg/client" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" @@ -121,8 +121,8 @@ func assertHeadlessSvcWithFIPHostNameEndpoints(ctx context.Context, guestClient } func assertServiceDiscoveryCondition(vsphereCluster *vmwarev1.VSphereCluster, status corev1.ConditionStatus, - message string, reason string, severity clusterv1.ConditionSeverity) { - c := conditions.Get(vsphereCluster, vmwarev1.ServiceDiscoveryReadyCondition) + message string, reason string, severity clusterv1beta1.ConditionSeverity) { + c := deprecatedconditions.Get(vsphereCluster, vmwarev1.ServiceDiscoveryReadyCondition) Expect(c).NotTo(BeNil()) if message == "" { Expect(c.Message).To(BeEmpty()) diff --git a/controllers/vmware/servicediscovery_controller_unit_test.go b/controllers/vmware/servicediscovery_controller_unit_test.go index 5a2a6dc33a..fc8e25171d 100644 --- a/controllers/vmware/servicediscovery_controller_unit_test.go +++ b/controllers/vmware/servicediscovery_controller_unit_test.go @@ -21,7 +21,7 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" bootstrapapi "k8s.io/cluster-bootstrap/token/api" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" capiutil "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/controller-runtime/pkg/client" @@ -57,7 +57,7 @@ func serviceDiscoveryUnitTestsReconcileNormal() { By("creating a service and no endpoint in the guest cluster") assertHeadlessSvcWithNoEndpoints(ctx, controllerCtx.GuestClient, supervisorHeadlessSvcNamespace, supervisorHeadlessSvcName) assertServiceDiscoveryCondition(controllerCtx.VSphereCluster, corev1.ConditionFalse, "Failed to discover supervisor API server endpoint", - vmwarev1.SupervisorHeadlessServiceSetupFailedReason, clusterv1.ConditionSeverityWarning) + vmwarev1.SupervisorHeadlessServiceSetupFailedReason, clusterv1beta1.ConditionSeverityWarning) }) }) Context("When VIP is available", func() { @@ -138,7 +138,7 @@ func serviceDiscoveryUnitTestsReconcileNormal() { By("creating a service and no endpoint in the guest cluster") assertHeadlessSvcWithNoEndpoints(ctx, controllerCtx.GuestClient, supervisorHeadlessSvcNamespace, supervisorHeadlessSvcName) assertServiceDiscoveryCondition(controllerCtx.VSphereCluster, corev1.ConditionFalse, "Failed to discover supervisor API server endpoint", - vmwarev1.SupervisorHeadlessServiceSetupFailedReason, clusterv1.ConditionSeverityWarning) + vmwarev1.SupervisorHeadlessServiceSetupFailedReason, clusterv1beta1.ConditionSeverityWarning) }) }) Context("When FIP is an invalid host", func() { @@ -151,7 +151,7 @@ func serviceDiscoveryUnitTestsReconcileNormal() { By("creating a service and no endpoint in the guest cluster") assertHeadlessSvcWithNoEndpoints(ctx, controllerCtx.GuestClient, supervisorHeadlessSvcNamespace, supervisorHeadlessSvcName) assertServiceDiscoveryCondition(controllerCtx.VSphereCluster, corev1.ConditionFalse, "Failed to discover supervisor API server endpoint", - vmwarev1.SupervisorHeadlessServiceSetupFailedReason, clusterv1.ConditionSeverityWarning) + vmwarev1.SupervisorHeadlessServiceSetupFailedReason, clusterv1beta1.ConditionSeverityWarning) }) }) Context("When FIP config map has invalid kubeconfig data", func() { @@ -167,7 +167,7 @@ func serviceDiscoveryUnitTestsReconcileNormal() { By("creating a service and no endpoint in the guest cluster") assertHeadlessSvcWithNoEndpoints(ctx, controllerCtx.GuestClient, supervisorHeadlessSvcNamespace, supervisorHeadlessSvcName) assertServiceDiscoveryCondition(controllerCtx.VSphereCluster, corev1.ConditionFalse, "Failed to discover supervisor API server endpoint", - vmwarev1.SupervisorHeadlessServiceSetupFailedReason, clusterv1.ConditionSeverityWarning) + vmwarev1.SupervisorHeadlessServiceSetupFailedReason, clusterv1beta1.ConditionSeverityWarning) }) }) Context("When FIP config map has invalid kubeconfig key", func() { @@ -183,7 +183,7 @@ func serviceDiscoveryUnitTestsReconcileNormal() { By("creating a service and no endpoint in the guest cluster") assertHeadlessSvcWithNoEndpoints(ctx, controllerCtx.GuestClient, supervisorHeadlessSvcNamespace, supervisorHeadlessSvcName) assertServiceDiscoveryCondition(controllerCtx.VSphereCluster, corev1.ConditionFalse, "Failed to discover supervisor API server endpoint", - vmwarev1.SupervisorHeadlessServiceSetupFailedReason, clusterv1.ConditionSeverityWarning) + vmwarev1.SupervisorHeadlessServiceSetupFailedReason, clusterv1beta1.ConditionSeverityWarning) }) }) } diff --git a/controllers/vmware/test/controllers_suite_test.go b/controllers/vmware/test/controllers_suite_test.go index e018d2d0e6..d11977c8f0 100644 --- a/controllers/vmware/test/controllers_suite_test.go +++ b/controllers/vmware/test/controllers_suite_test.go @@ -34,7 +34,7 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/klog/v2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/envtest" logf "sigs.k8s.io/controller-runtime/pkg/log" @@ -110,14 +110,14 @@ var _ = AfterSuite(func() { }) func findModuleDir(module string) string { - cmd := exec.Command("go", "mod", "download", "-json", module) + cmd := exec.Command("go", "list", "-json", "-m", module) out, err := cmd.Output() if err != nil { - klog.Fatalf("Failed to run go mod to find module %q directory", module) + klog.Fatalf("Failed to run go list to find module %q directory", module) } info := struct{ Dir string }{} if err := json.Unmarshal(out, &info); err != nil { - klog.Fatalf("Failed to unmarshal output from go mod command: %v", err) + klog.Fatalf("Failed to unmarshal output from go list command: %v", err) } else if info.Dir == "" { klog.Fatalf("Failed to find go module %q directory, received %v", module, string(out)) } diff --git a/controllers/vmware/test/controllers_test.go b/controllers/vmware/test/controllers_test.go index b482329c44..79dac1362f 100644 --- a/controllers/vmware/test/controllers_test.go +++ b/controllers/vmware/test/controllers_test.go @@ -34,7 +34,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/klog/v2" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/config" diff --git a/controllers/vmware/vspherecluster_reconciler.go b/controllers/vmware/vspherecluster_reconciler.go index 549ff90b9c..37b4006a29 100644 --- a/controllers/vmware/vspherecluster_reconciler.go +++ b/controllers/vmware/vspherecluster_reconciler.go @@ -28,14 +28,14 @@ import ( kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" clusterutilv1 "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/collections" - "sigs.k8s.io/cluster-api/util/conditions" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + deprecatedv1beta2conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions/v1beta2" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/paused" "sigs.k8s.io/cluster-api/util/finalizers" - "sigs.k8s.io/cluster-api/util/patch" - "sigs.k8s.io/cluster-api/util/paused" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -141,16 +141,16 @@ func (r *ClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ func (r *ClusterReconciler) patch(ctx context.Context, clusterCtx *vmware.ClusterContext) error { // always update the readyCondition. - conditions.SetSummary(clusterCtx.VSphereCluster, - conditions.WithConditions( + deprecatedconditions.SetSummary(clusterCtx.VSphereCluster, + deprecatedconditions.WithConditions( vmwarev1.ResourcePolicyReadyCondition, vmwarev1.ClusterNetworkReadyCondition, vmwarev1.LoadBalancerReadyCondition, ), ) - if err := v1beta2conditions.SetSummaryCondition(clusterCtx.VSphereCluster, clusterCtx.VSphereCluster, vmwarev1.VSphereClusterReadyV1Beta2Condition, - v1beta2conditions.ForConditionTypes{ + if err := deprecatedv1beta2conditions.SetSummaryCondition(clusterCtx.VSphereCluster, clusterCtx.VSphereCluster, vmwarev1.VSphereClusterReadyV1Beta2Condition, + deprecatedv1beta2conditions.ForConditionTypes{ vmwarev1.VSphereClusterResourcePolicyReadyV1Beta2Condition, vmwarev1.VSphereClusterNetworkReadyV1Beta2Condition, vmwarev1.VSphereClusterLoadBalancerReadyV1Beta2Condition, @@ -159,15 +159,15 @@ func (r *ClusterReconciler) patch(ctx context.Context, clusterCtx *vmware.Cluste vmwarev1.VSphereClusterProviderServiceAccountsReadyV1Beta2Condition, vmwarev1.VSphereClusterServiceDiscoveryReadyV1Beta2Condition, }, - v1beta2conditions.IgnoreTypesIfMissing{ + deprecatedv1beta2conditions.IgnoreTypesIfMissing{ vmwarev1.VSphereClusterProviderServiceAccountsReadyV1Beta2Condition, vmwarev1.VSphereClusterServiceDiscoveryReadyV1Beta2Condition, }, // Using a custom merge strategy to override reasons applied during merge. - v1beta2conditions.CustomMergeStrategy{ - MergeStrategy: v1beta2conditions.DefaultMergeStrategy( + deprecatedv1beta2conditions.CustomMergeStrategy{ + MergeStrategy: deprecatedv1beta2conditions.DefaultMergeStrategy( // Use custom reasons. - v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + deprecatedv1beta2conditions.ComputeReasonFunc(deprecatedv1beta2conditions.GetDefaultComputeMergeReasonFunc( vmwarev1.VSphereClusterNotReadyV1Beta2Reason, vmwarev1.VSphereClusterReadyUnknownV1Beta2Reason, vmwarev1.VSphereClusterReadyV1Beta2Reason, @@ -179,13 +179,13 @@ func (r *ClusterReconciler) patch(ctx context.Context, clusterCtx *vmware.Cluste } return clusterCtx.PatchHelper.Patch(ctx, clusterCtx.VSphereCluster, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ vmwarev1.ResourcePolicyReadyCondition, vmwarev1.ClusterNetworkReadyCondition, vmwarev1.LoadBalancerReadyCondition, }}, patch.WithOwnedV1Beta2Conditions{Conditions: []string{ - clusterv1.PausedV1Beta2Condition, + clusterv1beta1.PausedV1Beta2Condition, vmwarev1.VSphereClusterReadyV1Beta2Condition, vmwarev1.VSphereClusterResourcePolicyReadyV1Beta2Condition, vmwarev1.VSphereClusterNetworkReadyV1Beta2Condition, @@ -196,31 +196,31 @@ func (r *ClusterReconciler) patch(ctx context.Context, clusterCtx *vmware.Cluste } func (r *ClusterReconciler) reconcileDelete(clusterCtx *vmware.ClusterContext) { - deletingConditionTypes := []clusterv1.ConditionType{ + deletingConditionTypes := []clusterv1beta1.ConditionType{ vmwarev1.ResourcePolicyReadyCondition, vmwarev1.ClusterNetworkReadyCondition, vmwarev1.LoadBalancerReadyCondition, } for _, t := range deletingConditionTypes { - if c := conditions.Get(clusterCtx.VSphereCluster, t); c != nil { - conditions.MarkFalse(clusterCtx.VSphereCluster, t, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + if c := deprecatedconditions.Get(clusterCtx.VSphereCluster, t); c != nil { + deprecatedconditions.MarkFalse(clusterCtx.VSphereCluster, t, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") } } - v1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ + deprecatedv1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ Type: vmwarev1.VSphereClusterResourcePolicyReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: vmwarev1.VSphereClusterResourcePolicyReadyDeletingV1Beta2Reason, }) - v1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ + deprecatedv1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ Type: vmwarev1.VSphereClusterNetworkReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: vmwarev1.VSphereClusterNetworkReadyDeletingV1Beta2Reason, }) - v1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ + deprecatedv1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ Type: vmwarev1.VSphereClusterLoadBalancerReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: vmwarev1.VSphereClusterLoadBalancerDeletingV1Beta2Reason, @@ -245,8 +245,8 @@ func (r *ClusterReconciler) reconcileNormal(ctx context.Context, clusterCtx *vmw // Reconciling the ResourcePolicy early potentially saves us the extra relocate operation. resourcePolicyName, err := r.ResourcePolicyService.ReconcileResourcePolicy(ctx, clusterCtx) if err != nil { - conditions.MarkFalse(clusterCtx.VSphereCluster, vmwarev1.ResourcePolicyReadyCondition, vmwarev1.ResourcePolicyCreationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ + deprecatedconditions.MarkFalse(clusterCtx.VSphereCluster, vmwarev1.ResourcePolicyReadyCondition, vmwarev1.ResourcePolicyCreationFailedReason, clusterv1beta1.ConditionSeverityWarning, "%v", err) + deprecatedv1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ Type: vmwarev1.VSphereClusterResourcePolicyReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: vmwarev1.VSphereClusterResourcePolicyNotReadyV1Beta2Reason, @@ -256,8 +256,8 @@ func (r *ClusterReconciler) reconcileNormal(ctx context.Context, clusterCtx *vmw "failed to configure resource policy for vsphereCluster %s/%s", clusterCtx.VSphereCluster.Namespace, clusterCtx.VSphereCluster.Name) } - conditions.MarkTrue(clusterCtx.VSphereCluster, vmwarev1.ResourcePolicyReadyCondition) - v1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ + deprecatedconditions.MarkTrue(clusterCtx.VSphereCluster, vmwarev1.ResourcePolicyReadyCondition) + deprecatedv1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ Type: vmwarev1.VSphereClusterResourcePolicyReadyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: vmwarev1.VSphereClusterResourcePolicyReadyV1Beta2Reason, @@ -287,13 +287,13 @@ func (r *ClusterReconciler) reconcileControlPlaneEndpoint(ctx context.Context, c if !clusterCtx.Cluster.Spec.ControlPlaneEndpoint.IsZero() { clusterCtx.VSphereCluster.Spec.ControlPlaneEndpoint.Host = clusterCtx.Cluster.Spec.ControlPlaneEndpoint.Host clusterCtx.VSphereCluster.Spec.ControlPlaneEndpoint.Port = clusterCtx.Cluster.Spec.ControlPlaneEndpoint.Port - v1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ + deprecatedv1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ Type: vmwarev1.VSphereClusterLoadBalancerReadyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: vmwarev1.VSphereClusterLoadBalancerReadyV1Beta2Reason, }) if r.NetworkProvider.HasLoadBalancer() { - conditions.MarkTrue(clusterCtx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition) + deprecatedconditions.MarkTrue(clusterCtx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition) } log.Info("Skipping control plane endpoint reconciliation", "reason", "ControlPlaneEndpoint already set on Cluster", @@ -302,13 +302,13 @@ func (r *ClusterReconciler) reconcileControlPlaneEndpoint(ctx context.Context, c } if !clusterCtx.VSphereCluster.Spec.ControlPlaneEndpoint.IsZero() { - v1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ + deprecatedv1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ Type: vmwarev1.VSphereClusterLoadBalancerReadyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: vmwarev1.VSphereClusterLoadBalancerReadyV1Beta2Reason, }) if r.NetworkProvider.HasLoadBalancer() { - conditions.MarkTrue(clusterCtx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition) + deprecatedconditions.MarkTrue(clusterCtx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition) } log.Info("Skipping control plane endpoint reconciliation", "reason", "ControlPlaneEndpoint already set on VSphereCluster", @@ -370,7 +370,7 @@ func (r *ClusterReconciler) reconcileAPIEndpoints(ctx context.Context, clusterCt // Define a variable to assign the API endpoints of control plane // machines as they are discovered. - apiEndpointList := []clusterv1.APIEndpoint{} + apiEndpointList := []clusterv1beta1.APIEndpoint{} // Iterate over the cluster's control plane CAPI machines. for _, machine := range machines { @@ -401,7 +401,7 @@ func (r *ClusterReconciler) reconcileAPIEndpoints(ctx context.Context, clusterCt // Append the control plane machine's IP address to the list of API // endpoints for this cluster so that they can be read into the // analogous CAPI cluster via an unstructured reader. - apiEndpoint := clusterv1.APIEndpoint{ + apiEndpoint := clusterv1beta1.APIEndpoint{ Host: vsphereMachine.Status.IPAddr, Port: apiEndpointPort, } @@ -499,8 +499,8 @@ func (r *ClusterReconciler) ZoneToVSphereClusters(ctx context.Context, o client. // Returns the failure domain information discovered on the cluster // hosting this controller. -func (r *ClusterReconciler) getFailureDomains(ctx context.Context, namespace string) (clusterv1.FailureDomains, error) { - failureDomains := clusterv1.FailureDomains{} +func (r *ClusterReconciler) getFailureDomains(ctx context.Context, namespace string) (clusterv1beta1.FailureDomains, error) { + failureDomains := clusterv1beta1.FailureDomains{} // Determine the source of failure domain based on feature gates NamespaceScopedZones. // If NamespaceScopedZones is enabled, use Zone which is Namespace scoped,otherwise use // Availability Zone which is Cluster scoped. @@ -516,7 +516,7 @@ func (r *ClusterReconciler) getFailureDomains(ctx context.Context, namespace str if !zone.DeletionTimestamp.IsZero() { continue } - failureDomains[zone.Name] = clusterv1.FailureDomainSpec{ControlPlane: true} + failureDomains[zone.Name] = clusterv1beta1.FailureDomainSpec{ControlPlane: true} } if len(failureDomains) == 0 { @@ -534,7 +534,7 @@ func (r *ClusterReconciler) getFailureDomains(ctx context.Context, namespace str return nil, nil } for _, az := range availabilityZoneList.Items { - failureDomains[az.Name] = clusterv1.FailureDomainSpec{ + failureDomains[az.Name] = clusterv1beta1.FailureDomainSpec{ ControlPlane: true, } } diff --git a/controllers/vmware/vspherecluster_reconciler_test.go b/controllers/vmware/vspherecluster_reconciler_test.go index 2ec59bbd97..c79d12992b 100644 --- a/controllers/vmware/vspherecluster_reconciler_test.go +++ b/controllers/vmware/vspherecluster_reconciler_test.go @@ -29,8 +29,9 @@ import ( apirecord "k8s.io/client-go/tools/record" utilfeature "k8s.io/component-base/featuregate/testing" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -102,23 +103,23 @@ var _ = Describe("Cluster Controller Tests", func() { Context("Test reconcileDelete", func() { It("should mark specific resources to be in deleting conditions", func() { clusterCtx.VSphereCluster.Status.Conditions = append(clusterCtx.VSphereCluster.Status.Conditions, - clusterv1.Condition{Type: vmwarev1.ResourcePolicyReadyCondition, Status: corev1.ConditionTrue}) + clusterv1beta1.Condition{Type: vmwarev1.ResourcePolicyReadyCondition, Status: corev1.ConditionTrue}) reconciler.reconcileDelete(clusterCtx) - c := conditions.Get(clusterCtx.VSphereCluster, vmwarev1.ResourcePolicyReadyCondition) + c := deprecatedconditions.Get(clusterCtx.VSphereCluster, vmwarev1.ResourcePolicyReadyCondition) Expect(c).NotTo(BeNil()) Expect(c.Status).To(Equal(corev1.ConditionFalse)) - Expect(c.Reason).To(Equal(clusterv1.DeletingReason)) + Expect(c.Reason).To(Equal(clusterv1beta1.DeletingReason)) }) It("should not mark other resources to be in deleting conditions", func() { - otherReady := clusterv1.ConditionType("OtherReady") + otherReady := clusterv1beta1.ConditionType("OtherReady") clusterCtx.VSphereCluster.Status.Conditions = append(clusterCtx.VSphereCluster.Status.Conditions, - clusterv1.Condition{Type: otherReady, Status: corev1.ConditionTrue}) + clusterv1beta1.Condition{Type: otherReady, Status: corev1.ConditionTrue}) reconciler.reconcileDelete(clusterCtx) - c := conditions.Get(clusterCtx.VSphereCluster, otherReady) + c := deprecatedconditions.Get(clusterCtx.VSphereCluster, otherReady) Expect(c).NotTo(BeNil()) Expect(c.Status).NotTo(Equal(corev1.ConditionFalse)) - Expect(c.Reason).NotTo(Equal(clusterv1.DeletingReason)) + Expect(c.Reason).NotTo(Equal(clusterv1beta1.DeletingReason)) }) }) }) @@ -140,7 +141,7 @@ func TestClusterReconciler_getFailureDomains(t *testing.T) { tests := []struct { name string objects []client.Object - want clusterv1.FailureDomains + want clusterv1beta1.FailureDomains wantErr bool featureGate bool }{ @@ -258,10 +259,10 @@ func zone(namespace, name string, deleting bool) *topologyv1.Zone { return z } -func failureDomains(names ...string) clusterv1.FailureDomains { - fds := clusterv1.FailureDomains{} +func failureDomains(names ...string) clusterv1beta1.FailureDomains { + fds := clusterv1beta1.FailureDomains{} for _, name := range names { - fds[name] = clusterv1.FailureDomainSpec{ + fds[name] = clusterv1beta1.FailureDomainSpec{ ControlPlane: true, } } diff --git a/controllers/vmware/vspheremachinetemplate_controller.go b/controllers/vmware/vspheremachinetemplate_controller.go index 0bc971b5d9..82338aa0df 100644 --- a/controllers/vmware/vspheremachinetemplate_controller.go +++ b/controllers/vmware/vspheremachinetemplate_controller.go @@ -24,7 +24,7 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" - "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" diff --git a/controllers/vspherecluster_controller.go b/controllers/vspherecluster_controller.go index fafac909de..4abf4d878c 100644 --- a/controllers/vspherecluster_controller.go +++ b/controllers/vspherecluster_controller.go @@ -22,7 +22,7 @@ import ( "github.com/pkg/errors" "k8s.io/klog/v2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" clusterutilv1 "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/predicates" diff --git a/controllers/vspherecluster_reconciler.go b/controllers/vspherecluster_reconciler.go index 0c813aa1a9..dccf4d7ae5 100644 --- a/controllers/vspherecluster_reconciler.go +++ b/controllers/vspherecluster_reconciler.go @@ -31,13 +31,15 @@ import ( kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/klog/v2" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" clusterutilv1 "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + deprecatedv1beta2conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions/v1beta2" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/paused" "sigs.k8s.io/cluster-api/util/finalizers" - "sigs.k8s.io/cluster-api/util/patch" - "sigs.k8s.io/cluster-api/util/paused" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -130,28 +132,28 @@ func (r *clusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ // patch updates the VSphereCluster and its status on the API server. func (r *clusterReconciler) patch(ctx context.Context, clusterCtx *capvcontext.ClusterContext) error { // always update the readyCondition. - conditions.SetSummary(clusterCtx.VSphereCluster, - conditions.WithConditions( + deprecatedconditions.SetSummary(clusterCtx.VSphereCluster, + deprecatedconditions.WithConditions( infrav1.VCenterAvailableCondition, ), ) - if err := v1beta2conditions.SetSummaryCondition(clusterCtx.VSphereCluster, clusterCtx.VSphereCluster, infrav1.VSphereClusterReadyV1Beta2Condition, - v1beta2conditions.ForConditionTypes{ + if err := deprecatedv1beta2conditions.SetSummaryCondition(clusterCtx.VSphereCluster, clusterCtx.VSphereCluster, infrav1.VSphereClusterReadyV1Beta2Condition, + deprecatedv1beta2conditions.ForConditionTypes{ infrav1.VSphereClusterVCenterAvailableV1Beta2Condition, // FailureDomainsReady and ClusterModuelsReady may not be always set. infrav1.VSphereClusterFailureDomainsReadyV1Beta2Condition, infrav1.VSphereClusterClusterModulesReadyV1Beta2Condition, }, - v1beta2conditions.IgnoreTypesIfMissing{ + deprecatedv1beta2conditions.IgnoreTypesIfMissing{ infrav1.VSphereClusterFailureDomainsReadyV1Beta2Condition, infrav1.VSphereClusterClusterModulesReadyV1Beta2Condition, }, // Using a custom merge strategy to override reasons applied during merge. - v1beta2conditions.CustomMergeStrategy{ - MergeStrategy: v1beta2conditions.DefaultMergeStrategy( + deprecatedv1beta2conditions.CustomMergeStrategy{ + MergeStrategy: deprecatedv1beta2conditions.DefaultMergeStrategy( // Use custom reasons. - v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + deprecatedv1beta2conditions.ComputeReasonFunc(deprecatedv1beta2conditions.GetDefaultComputeMergeReasonFunc( infrav1.VSphereClusterNotReadyV1Beta2Reason, infrav1.VSphereClusterReadyUnknownV1Beta2Reason, infrav1.VSphereClusterReadyV1Beta2Reason, @@ -164,7 +166,7 @@ func (r *clusterReconciler) patch(ctx context.Context, clusterCtx *capvcontext.C return clusterCtx.PatchHelper.Patch(ctx, clusterCtx.VSphereCluster, patch.WithOwnedV1Beta2Conditions{Conditions: []string{ - clusterv1.PausedV1Beta2Condition, + clusterv1beta1.PausedV1Beta2Condition, infrav1.VSphereClusterReadyV1Beta2Condition, infrav1.VSphereClusterFailureDomainsReadyV1Beta2Condition, infrav1.VSphereClusterVCenterAvailableV1Beta2Condition, @@ -176,17 +178,17 @@ func (r *clusterReconciler) patch(ctx context.Context, clusterCtx *capvcontext.C func (r *clusterReconciler) reconcileDelete(ctx context.Context, clusterCtx *capvcontext.ClusterContext) (reconcile.Result, error) { log := ctrl.LoggerFrom(ctx) - v1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ + deprecatedv1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ Type: infrav1.VSphereClusterVCenterAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereClusterVCenterAvailableDeletingV1Beta2Reason, }) - v1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ + deprecatedv1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ Type: infrav1.VSphereClusterClusterModulesReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereClusterClusterModulesDeletingV1Beta2Reason, }) - v1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ + deprecatedv1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ Type: infrav1.VSphereClusterFailureDomainsReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereClusterFailureDomainsDeletingV1Beta2Reason, @@ -257,7 +259,7 @@ func (r *clusterReconciler) reconcileNormal(ctx context.Context, clusterCtx *cap // Reconcile failure domains. ok, err := r.reconcileDeploymentZones(ctx, clusterCtx) if err != nil { - v1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ + deprecatedv1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ Type: infrav1.VSphereClusterFailureDomainsReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereClusterFailureDomainsNotReadyV1Beta2Reason, @@ -271,8 +273,8 @@ func (r *clusterReconciler) reconcileNormal(ctx context.Context, clusterCtx *cap // Reconcile vCenter availability. if err := r.reconcileIdentitySecret(ctx, clusterCtx); err != nil { - conditions.MarkFalse(clusterCtx.VSphereCluster, infrav1.VCenterAvailableCondition, infrav1.VCenterUnreachableReason, clusterv1.ConditionSeverityError, err.Error()) - v1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ + deprecatedconditions.MarkFalse(clusterCtx.VSphereCluster, infrav1.VCenterAvailableCondition, infrav1.VCenterUnreachableReason, clusterv1beta1.ConditionSeverityError, "%v", err) + deprecatedv1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ Type: infrav1.VSphereClusterVCenterAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereClusterVCenterUnreachableV1Beta2Reason, @@ -283,8 +285,8 @@ func (r *clusterReconciler) reconcileNormal(ctx context.Context, clusterCtx *cap vcenterSession, err := r.reconcileVCenterConnectivity(ctx, clusterCtx) if err != nil { - conditions.MarkFalse(clusterCtx.VSphereCluster, infrav1.VCenterAvailableCondition, infrav1.VCenterUnreachableReason, clusterv1.ConditionSeverityError, err.Error()) - v1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ + deprecatedconditions.MarkFalse(clusterCtx.VSphereCluster, infrav1.VCenterAvailableCondition, infrav1.VCenterUnreachableReason, clusterv1beta1.ConditionSeverityError, "%v", err) + deprecatedv1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ Type: infrav1.VSphereClusterVCenterAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereClusterVCenterUnreachableV1Beta2Reason, @@ -293,8 +295,8 @@ func (r *clusterReconciler) reconcileNormal(ctx context.Context, clusterCtx *cap return reconcile.Result{}, pkgerrors.Wrapf(err, "unexpected error while probing vcenter for %s", clusterCtx) } - conditions.MarkTrue(clusterCtx.VSphereCluster, infrav1.VCenterAvailableCondition) - v1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ + deprecatedconditions.MarkTrue(clusterCtx.VSphereCluster, infrav1.VCenterAvailableCondition) + deprecatedv1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ Type: infrav1.VSphereClusterVCenterAvailableV1Beta2Condition, Status: metav1.ConditionTrue, Reason: infrav1.VSphereClusterVCenterAvailableV1Beta2Reason, @@ -303,8 +305,8 @@ func (r *clusterReconciler) reconcileNormal(ctx context.Context, clusterCtx *cap // Reconcile cluster modules. err = r.reconcileVCenterVersion(clusterCtx, vcenterSession) if err != nil || clusterCtx.VSphereCluster.Status.VCenterVersion == "" { - conditions.MarkFalse(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition, infrav1.MissingVCenterVersionReason, clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ + deprecatedconditions.MarkFalse(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition, infrav1.MissingVCenterVersionReason, clusterv1beta1.ConditionSeverityWarning, "%v", err) + deprecatedv1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ Type: infrav1.VSphereClusterClusterModulesReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereClusterModulesInvalidVCenterVersionV1Beta2Reason, @@ -315,8 +317,8 @@ func (r *clusterReconciler) reconcileNormal(ctx context.Context, clusterCtx *cap affinityReconcileResult, err := r.reconcileClusterModules(ctx, clusterCtx) if err != nil { - conditions.MarkFalse(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition, infrav1.ClusterModuleSetupFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ + deprecatedconditions.MarkFalse(clusterCtx.VSphereCluster, infrav1.ClusterModulesAvailableCondition, infrav1.ClusterModuleSetupFailedReason, clusterv1beta1.ConditionSeverityWarning, "%v", err) + deprecatedv1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ Type: infrav1.VSphereClusterClusterModulesReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereClusterClusterModulesNotReadyV1Beta2Reason, @@ -422,7 +424,7 @@ func (r *clusterReconciler) reconcileDeploymentZones(ctx context.Context, cluste } readyNotReported, notReady := 0, 0 - failureDomains := clusterv1.FailureDomains{} + failureDomains := clusterv1beta1.FailureDomains{} for _, zone := range deploymentZoneList.Items { if zone.Spec.Server != clusterCtx.VSphereCluster.Spec.Server { continue @@ -430,14 +432,14 @@ func (r *clusterReconciler) reconcileDeploymentZones(ctx context.Context, cluste if zone.Status.Ready == nil { readyNotReported++ - failureDomains[zone.Name] = clusterv1.FailureDomainSpec{ + failureDomains[zone.Name] = clusterv1beta1.FailureDomainSpec{ ControlPlane: ptr.Deref(zone.Spec.ControlPlane, true), } continue } if *zone.Status.Ready { - failureDomains[zone.Name] = clusterv1.FailureDomainSpec{ + failureDomains[zone.Name] = clusterv1beta1.FailureDomainSpec{ ControlPlane: ptr.Deref(zone.Spec.ControlPlane, true), } continue @@ -448,8 +450,8 @@ func (r *clusterReconciler) reconcileDeploymentZones(ctx context.Context, cluste clusterCtx.VSphereCluster.Status.FailureDomains = failureDomains if readyNotReported > 0 { log.Info("Waiting for failure domains to be reconciled") - conditions.MarkFalse(clusterCtx.VSphereCluster, infrav1.FailureDomainsAvailableCondition, infrav1.WaitingForFailureDomainStatusReason, clusterv1.ConditionSeverityInfo, "waiting for failure domains to report ready status") - v1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ + deprecatedconditions.MarkFalse(clusterCtx.VSphereCluster, infrav1.FailureDomainsAvailableCondition, infrav1.WaitingForFailureDomainStatusReason, clusterv1beta1.ConditionSeverityInfo, "waiting for failure domains to report ready status") + deprecatedv1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ Type: infrav1.VSphereClusterFailureDomainsReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereClusterFailureDomainsNotReadyV1Beta2Reason, @@ -460,16 +462,16 @@ func (r *clusterReconciler) reconcileDeploymentZones(ctx context.Context, cluste if len(failureDomains) > 0 { if notReady > 0 { - conditions.MarkFalse(clusterCtx.VSphereCluster, infrav1.FailureDomainsAvailableCondition, infrav1.FailureDomainsSkippedReason, clusterv1.ConditionSeverityInfo, "one or more failure domains are not ready") - v1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ + deprecatedconditions.MarkFalse(clusterCtx.VSphereCluster, infrav1.FailureDomainsAvailableCondition, infrav1.FailureDomainsSkippedReason, clusterv1beta1.ConditionSeverityInfo, "one or more failure domains are not ready") + deprecatedv1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ Type: infrav1.VSphereClusterFailureDomainsReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereClusterFailureDomainsNotReadyV1Beta2Reason, Message: "One or more failure domains are not ready", }) } else { - conditions.MarkTrue(clusterCtx.VSphereCluster, infrav1.FailureDomainsAvailableCondition) - v1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ + deprecatedconditions.MarkTrue(clusterCtx.VSphereCluster, infrav1.FailureDomainsAvailableCondition) + deprecatedv1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ Type: infrav1.VSphereClusterFailureDomainsReadyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: infrav1.VSphereClusterFailureDomainsReadyV1Beta2Reason, @@ -477,7 +479,7 @@ func (r *clusterReconciler) reconcileDeploymentZones(ctx context.Context, cluste } } else { // Remove the condition if failure domains do not exist - conditions.Delete(clusterCtx.VSphereCluster, infrav1.FailureDomainsAvailableCondition) + deprecatedconditions.Delete(clusterCtx.VSphereCluster, infrav1.FailureDomainsAvailableCondition) } return true, nil } @@ -535,7 +537,7 @@ func (r *clusterReconciler) controlPlaneMachineToCluster(ctx context.Context, o } ctx = ctrl.LoggerInto(ctx, log) - if conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { + if conditions.IsTrue(cluster, clusterv1.ClusterControlPlaneInitializedCondition) { log.V(6).Info("Skipping VSphereCluster reconcile as control plane is already initialized") return nil } diff --git a/controllers/vspherecluster_reconciler_test.go b/controllers/vspherecluster_reconciler_test.go index 56334119bd..e03b64610f 100644 --- a/controllers/vspherecluster_reconciler_test.go +++ b/controllers/vspherecluster_reconciler_test.go @@ -29,9 +29,10 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" - "sigs.k8s.io/cluster-api/util/patch" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/controller-runtime/pkg/client" ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -160,7 +161,7 @@ var _ = Describe("VIM based VSphere ClusterReconciler", func() { if err := testEnv.Get(ctx, key, instance); err != nil { return false } - return conditions.IsTrue(instance, infrav1.VCenterAvailableCondition) + return deprecatedconditions.IsTrue(instance, infrav1.VCenterAvailableCondition) }, timeout).Should(BeTrue()) }) @@ -233,15 +234,15 @@ var _ = Describe("VIM based VSphere ClusterReconciler", func() { return false } - actual := conditions.Get(instance, infrav1.VCenterAvailableCondition) + actual := deprecatedconditions.Get(instance, infrav1.VCenterAvailableCondition) if actual == nil { return false } actual.Message = "" - return Expect(actual).Should(conditions.HaveSameStateOf(&clusterv1.Condition{ + return Expect(actual).Should(deprecatedconditions.HaveSameStateOf(&clusterv1beta1.Condition{ Type: infrav1.VCenterAvailableCondition, Status: corev1.ConditionFalse, - Severity: clusterv1.ConditionSeverityError, + Severity: clusterv1beta1.ConditionSeverityError, Reason: infrav1.VCenterUnreachableReason, })) }, timeout).Should(BeTrue()) @@ -421,9 +422,9 @@ var _ = Describe("VIM based VSphere ClusterReconciler", func() { if err := testEnv.Get(ctx, key, instance); err != nil { return false } - return conditions.Has(instance, infrav1.FailureDomainsAvailableCondition) && - conditions.IsFalse(instance, infrav1.FailureDomainsAvailableCondition) && - conditions.Get(instance, infrav1.FailureDomainsAvailableCondition).Reason == infrav1.WaitingForFailureDomainStatusReason + return deprecatedconditions.Has(instance, infrav1.FailureDomainsAvailableCondition) && + deprecatedconditions.IsFalse(instance, infrav1.FailureDomainsAvailableCondition) && + deprecatedconditions.Get(instance, infrav1.FailureDomainsAvailableCondition).Reason == infrav1.WaitingForFailureDomainStatusReason }, timeout).Should(BeTrue()) By("Setting the status of the Deployment Zone to true") @@ -438,8 +439,8 @@ var _ = Describe("VIM based VSphere ClusterReconciler", func() { if err := testEnv.Get(ctx, key, instance); err != nil { return false } - return conditions.Has(instance, infrav1.FailureDomainsAvailableCondition) && - conditions.IsTrue(instance, infrav1.FailureDomainsAvailableCondition) + return deprecatedconditions.Has(instance, infrav1.FailureDomainsAvailableCondition) && + deprecatedconditions.IsTrue(instance, infrav1.FailureDomainsAvailableCondition) }, timeout).Should(BeTrue()) }) @@ -460,8 +461,8 @@ var _ = Describe("VIM based VSphere ClusterReconciler", func() { if err := testEnv.Get(ctx, key, instance); err != nil { return false } - return conditions.Has(instance, infrav1.FailureDomainsAvailableCondition) && - conditions.IsTrue(instance, infrav1.FailureDomainsAvailableCondition) + return deprecatedconditions.Has(instance, infrav1.FailureDomainsAvailableCondition) && + deprecatedconditions.IsTrue(instance, infrav1.FailureDomainsAvailableCondition) }, timeout).Should(BeTrue()) By("Deleting the Deployment Zone", func() { @@ -472,7 +473,7 @@ var _ = Describe("VIM based VSphere ClusterReconciler", func() { if err := testEnv.Get(ctx, key, instance); err != nil { return false } - return conditions.Has(instance, infrav1.FailureDomainsAvailableCondition) + return deprecatedconditions.Has(instance, infrav1.FailureDomainsAvailableCondition) }, timeout).Should(BeFalse()) }) }) @@ -494,7 +495,7 @@ func TestClusterReconciler_ReconcileDeploymentZones(t *testing.T) { name: "with no deployment zones", reconciled: true, assert: func(vsphereCluster *infrav1.VSphereCluster) { - g.Expect(conditions.Has(vsphereCluster, infrav1.FailureDomainsAvailableCondition)).To(BeFalse()) + g.Expect(deprecatedconditions.Has(vsphereCluster, infrav1.FailureDomainsAvailableCondition)).To(BeFalse()) }, }, { @@ -505,7 +506,7 @@ func TestClusterReconciler_ReconcileDeploymentZones(t *testing.T) { deploymentZone(server, "zone-2", ptr.To(true), ptr.To(true)), }, assert: func(vsphereCluster *infrav1.VSphereCluster) { - g.Expect(conditions.Has(vsphereCluster, infrav1.FailureDomainsAvailableCondition)).To(BeFalse()) + g.Expect(deprecatedconditions.Has(vsphereCluster, infrav1.FailureDomainsAvailableCondition)).To(BeFalse()) }, }, } @@ -543,7 +544,7 @@ func TestClusterReconciler_ReconcileDeploymentZones(t *testing.T) { name: "with no deployment zones", reconciled: true, assert: func(vsphereCluster *infrav1.VSphereCluster) { - g.Expect(conditions.Has(vsphereCluster, infrav1.FailureDomainsAvailableCondition)).To(BeFalse()) + g.Expect(deprecatedconditions.Has(vsphereCluster, infrav1.FailureDomainsAvailableCondition)).To(BeFalse()) }, }, { @@ -553,8 +554,8 @@ func TestClusterReconciler_ReconcileDeploymentZones(t *testing.T) { deploymentZone(server, "zone-2", ptr.To(true), ptr.To(false)), }, assert: func(vsphereCluster *infrav1.VSphereCluster) { - g.Expect(conditions.IsFalse(vsphereCluster, infrav1.FailureDomainsAvailableCondition)).To(BeTrue()) - g.Expect(conditions.Get(vsphereCluster, infrav1.FailureDomainsAvailableCondition).Reason).To(Equal(infrav1.WaitingForFailureDomainStatusReason)) + g.Expect(deprecatedconditions.IsFalse(vsphereCluster, infrav1.FailureDomainsAvailableCondition)).To(BeTrue()) + g.Expect(deprecatedconditions.Get(vsphereCluster, infrav1.FailureDomainsAvailableCondition).Reason).To(Equal(infrav1.WaitingForFailureDomainStatusReason)) }, }, { @@ -565,8 +566,8 @@ func TestClusterReconciler_ReconcileDeploymentZones(t *testing.T) { deploymentZone(server, "zone-2", ptr.To(true), ptr.To(true)), }, assert: func(vsphereCluster *infrav1.VSphereCluster) { - g.Expect(conditions.IsFalse(vsphereCluster, infrav1.FailureDomainsAvailableCondition)).To(BeTrue()) - g.Expect(conditions.Get(vsphereCluster, infrav1.FailureDomainsAvailableCondition).Reason).To(Equal(infrav1.FailureDomainsSkippedReason)) + g.Expect(deprecatedconditions.IsFalse(vsphereCluster, infrav1.FailureDomainsAvailableCondition)).To(BeTrue()) + g.Expect(deprecatedconditions.Get(vsphereCluster, infrav1.FailureDomainsAvailableCondition).Reason).To(Equal(infrav1.FailureDomainsSkippedReason)) }, }, { @@ -577,7 +578,7 @@ func TestClusterReconciler_ReconcileDeploymentZones(t *testing.T) { deploymentZone(server, "zone-2", ptr.To(true), ptr.To(true)), }, assert: func(vsphereCluster *infrav1.VSphereCluster) { - g.Expect(conditions.IsTrue(vsphereCluster, infrav1.FailureDomainsAvailableCondition)).To(BeTrue()) + g.Expect(deprecatedconditions.IsTrue(vsphereCluster, infrav1.FailureDomainsAvailableCondition)).To(BeTrue()) }, }, } diff --git a/controllers/vsphereclusteridentity_controller.go b/controllers/vsphereclusteridentity_controller.go index 99faa68206..92cb2d5f80 100644 --- a/controllers/vsphereclusteridentity_controller.go +++ b/controllers/vsphereclusteridentity_controller.go @@ -27,13 +27,13 @@ import ( kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" clusterutilv1 "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + deprecatedv1beta2conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions/v1beta2" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/paused" "sigs.k8s.io/cluster-api/util/finalizers" - "sigs.k8s.io/cluster-api/util/patch" - "sigs.k8s.io/cluster-api/util/paused" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -99,10 +99,10 @@ func (r clusterIdentityReconciler) Reconcile(ctx context.Context, req reconcile. } defer func() { - conditions.SetSummary(identity, conditions.WithConditions(infrav1.CredentialsAvailableCondidtion)) + deprecatedconditions.SetSummary(identity, deprecatedconditions.WithConditions(infrav1.CredentialsAvailableCondidtion)) if err := patchHelper.Patch(ctx, identity, patch.WithOwnedV1Beta2Conditions{Conditions: []string{ - clusterv1.PausedV1Beta2Condition, + clusterv1beta1.PausedV1Beta2Condition, infrav1.VSphereClusterIdentityAvailableV1Beta2Condition, }}); err != nil { reterr = kerrors.NewAggregate([]error{reterr, err}) @@ -120,8 +120,8 @@ func (r clusterIdentityReconciler) Reconcile(ctx context.Context, req reconcile. Name: identity.Spec.SecretName, } if err := r.Client.Get(ctx, secretKey, secret); err != nil { - conditions.MarkFalse(identity, infrav1.CredentialsAvailableCondidtion, infrav1.SecretNotAvailableReason, clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(identity, metav1.Condition{ + deprecatedconditions.MarkFalse(identity, infrav1.CredentialsAvailableCondidtion, infrav1.SecretNotAvailableReason, clusterv1beta1.ConditionSeverityWarning, "%v", err) + deprecatedv1beta2conditions.Set(identity, metav1.Condition{ Type: infrav1.VSphereClusterIdentityAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereClusterIdentitySecretNotAvailableV1Beta2Reason, @@ -132,8 +132,8 @@ func (r clusterIdentityReconciler) Reconcile(ctx context.Context, req reconcile. // If this secret is owned by a different VSphereClusterIdentity or a VSphereCluster, mark the identity as not ready and return an error. if !clusterutilv1.IsOwnedByObject(secret, identity) && pkgidentity.IsOwnedByIdentityOrCluster(secret.GetOwnerReferences()) { - conditions.MarkFalse(identity, infrav1.CredentialsAvailableCondidtion, infrav1.SecretAlreadyInUseReason, clusterv1.ConditionSeverityError, "secret being used by another Cluster/VSphereIdentity") - v1beta2conditions.Set(identity, metav1.Condition{ + deprecatedconditions.MarkFalse(identity, infrav1.CredentialsAvailableCondidtion, infrav1.SecretAlreadyInUseReason, clusterv1beta1.ConditionSeverityError, "secret being used by another Cluster/VSphereIdentity") + deprecatedv1beta2conditions.Set(identity, metav1.Condition{ Type: infrav1.VSphereClusterIdentityAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereClusterIdentitySecretAlreadyInUseV1Beta2Reason, @@ -158,8 +158,8 @@ func (r clusterIdentityReconciler) Reconcile(ctx context.Context, req reconcile. } err = r.Client.Update(ctx, secret) if err != nil { - conditions.MarkFalse(identity, infrav1.CredentialsAvailableCondidtion, infrav1.SecretOwnerReferenceFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(identity, metav1.Condition{ + deprecatedconditions.MarkFalse(identity, infrav1.CredentialsAvailableCondidtion, infrav1.SecretOwnerReferenceFailedReason, clusterv1beta1.ConditionSeverityWarning, "%v", err) + deprecatedv1beta2conditions.Set(identity, metav1.Condition{ Type: infrav1.VSphereClusterIdentityAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereClusterIdentitySettingSecretOwnerReferenceFailedV1Beta2Reason, @@ -168,8 +168,8 @@ func (r clusterIdentityReconciler) Reconcile(ctx context.Context, req reconcile. return reconcile.Result{}, err } - conditions.MarkTrue(identity, infrav1.CredentialsAvailableCondidtion) - v1beta2conditions.Set(identity, metav1.Condition{ + deprecatedconditions.MarkTrue(identity, infrav1.CredentialsAvailableCondidtion) + deprecatedv1beta2conditions.Set(identity, metav1.Condition{ Type: infrav1.VSphereClusterIdentityAvailableV1Beta2Condition, Status: metav1.ConditionTrue, Reason: infrav1.VSphereClusterIdentityAvailableV1Beta2Reason, @@ -187,7 +187,7 @@ func (r clusterIdentityReconciler) reconcileDelete(ctx context.Context, identity Name: identity.Spec.SecretName, } - v1beta2conditions.Set(identity, metav1.Condition{ + deprecatedv1beta2conditions.Set(identity, metav1.Condition{ Type: infrav1.VSphereClusterIdentityAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereClusterIdentityDeletingV1Beta2Reason, diff --git a/controllers/vsphereclusteridentity_controller_test.go b/controllers/vsphereclusteridentity_controller_test.go index 77dcb9b054..f3747866b8 100644 --- a/controllers/vsphereclusteridentity_controller_test.go +++ b/controllers/vsphereclusteridentity_controller_test.go @@ -22,7 +22,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterutilv1 "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" @@ -157,7 +157,7 @@ var _ = Describe("VSphereClusterIdentity Reconciler", func() { return false } - if !i.Status.Ready && conditions.GetReason(i, infrav1.CredentialsAvailableCondidtion) == infrav1.SecretAlreadyInUseReason { + if !i.Status.Ready && deprecatedconditions.GetReason(i, infrav1.CredentialsAvailableCondidtion) == infrav1.SecretAlreadyInUseReason { return true } return false @@ -181,7 +181,7 @@ var _ = Describe("VSphereClusterIdentity Reconciler", func() { return false } - if !i.Status.Ready && conditions.GetReason(i, infrav1.CredentialsAvailableCondidtion) == infrav1.SecretNotAvailableReason { + if !i.Status.Ready && deprecatedconditions.GetReason(i, infrav1.CredentialsAvailableCondidtion) == infrav1.SecretNotAvailableReason { return true } return false diff --git a/controllers/vspheredeploymentzone_controller.go b/controllers/vspheredeploymentzone_controller.go index a1aef6cfe6..7e2e88b947 100644 --- a/controllers/vspheredeploymentzone_controller.go +++ b/controllers/vspheredeploymentzone_controller.go @@ -27,14 +27,15 @@ import ( kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/klog/v2" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" clusterutilv1 "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/collections" - "sigs.k8s.io/cluster-api/util/conditions" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + deprecatedv1beta2conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions/v1beta2" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/paused" "sigs.k8s.io/cluster-api/util/finalizers" - "sigs.k8s.io/cluster-api/util/patch" - "sigs.k8s.io/cluster-api/util/paused" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -137,25 +138,25 @@ func (r vsphereDeploymentZoneReconciler) Reconcile(ctx context.Context, request // Patch patches the VSphereDeploymentZone. func (r vsphereDeploymentZoneReconciler) patch(ctx context.Context, vsphereDeploymentZoneContext *capvcontext.VSphereDeploymentZoneContext) error { - conditions.SetSummary(vsphereDeploymentZoneContext.VSphereDeploymentZone, - conditions.WithConditions( + deprecatedconditions.SetSummary(vsphereDeploymentZoneContext.VSphereDeploymentZone, + deprecatedconditions.WithConditions( infrav1.VCenterAvailableCondition, infrav1.VSphereFailureDomainValidatedCondition, infrav1.PlacementConstraintMetCondition, ), ) - if err := v1beta2conditions.SetSummaryCondition(vsphereDeploymentZoneContext.VSphereDeploymentZone, vsphereDeploymentZoneContext.VSphereDeploymentZone, infrav1.VSphereDeploymentZoneReadyV1Beta2Condition, - v1beta2conditions.ForConditionTypes{ + if err := deprecatedv1beta2conditions.SetSummaryCondition(vsphereDeploymentZoneContext.VSphereDeploymentZone, vsphereDeploymentZoneContext.VSphereDeploymentZone, infrav1.VSphereDeploymentZoneReadyV1Beta2Condition, + deprecatedv1beta2conditions.ForConditionTypes{ infrav1.VSphereDeploymentZonePlacementConstraintReadyV1Beta2Condition, infrav1.VSphereDeploymentZoneVCenterAvailableV1Beta2Condition, infrav1.VSphereDeploymentZoneFailureDomainValidatedV1Beta2Condition, }, // Using a custom merge strategy to override reasons applied during merge. - v1beta2conditions.CustomMergeStrategy{ - MergeStrategy: v1beta2conditions.DefaultMergeStrategy( + deprecatedv1beta2conditions.CustomMergeStrategy{ + MergeStrategy: deprecatedv1beta2conditions.DefaultMergeStrategy( // Use custom reasons. - v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + deprecatedv1beta2conditions.ComputeReasonFunc(deprecatedv1beta2conditions.GetDefaultComputeMergeReasonFunc( infrav1.VSphereDeploymentZoneNotReadyV1Beta2Reason, infrav1.VSphereDeploymentZoneReadyUnknownV1Beta2Reason, infrav1.VSphereDeploymentZoneReadyV1Beta2Reason, @@ -168,7 +169,7 @@ func (r vsphereDeploymentZoneReconciler) patch(ctx context.Context, vsphereDeplo return vsphereDeploymentZoneContext.PatchHelper.Patch(ctx, vsphereDeploymentZoneContext.VSphereDeploymentZone, patch.WithOwnedV1Beta2Conditions{Conditions: []string{ - clusterv1.PausedV1Beta2Condition, + clusterv1beta1.PausedV1Beta2Condition, infrav1.VSphereDeploymentZoneReadyV1Beta2Condition, infrav1.VSphereDeploymentZonePlacementConstraintReadyV1Beta2Condition, infrav1.VSphereDeploymentZoneVCenterAvailableV1Beta2Condition, @@ -186,8 +187,8 @@ func (r vsphereDeploymentZoneReconciler) reconcileNormal(ctx context.Context, de authSession, err := r.getVCenterSession(ctx, deploymentZoneCtx, failureDomain.Spec.Topology.Datacenter) if err != nil { - conditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VCenterAvailableCondition, infrav1.VCenterUnreachableReason, clusterv1.ConditionSeverityError, err.Error()) - v1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ + deprecatedconditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VCenterAvailableCondition, infrav1.VCenterUnreachableReason, clusterv1beta1.ConditionSeverityError, "%v", err) + deprecatedv1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ Type: infrav1.VSphereDeploymentZoneVCenterAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereDeploymentZoneVCenterUnreachableV1Beta2Reason, @@ -198,8 +199,8 @@ func (r vsphereDeploymentZoneReconciler) reconcileNormal(ctx context.Context, de } deploymentZoneCtx.AuthSession = authSession - conditions.MarkTrue(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VCenterAvailableCondition) - v1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ + deprecatedconditions.MarkTrue(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VCenterAvailableCondition) + deprecatedv1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ Type: infrav1.VSphereDeploymentZoneVCenterAvailableV1Beta2Condition, Status: metav1.ConditionTrue, Reason: infrav1.VSphereDeploymentZoneVCenterAvailableV1Beta2Reason, @@ -226,8 +227,8 @@ func (r vsphereDeploymentZoneReconciler) reconcilePlacementConstraint(ctx contex if resourcePool := placementConstraint.ResourcePool; resourcePool != "" { if _, err := deploymentZoneCtx.AuthSession.Finder.ResourcePool(ctx, resourcePool); err != nil { - conditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.PlacementConstraintMetCondition, infrav1.ResourcePoolNotFoundReason, clusterv1.ConditionSeverityError, "resource pool %s is misconfigured", resourcePool) - v1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ + deprecatedconditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.PlacementConstraintMetCondition, infrav1.ResourcePoolNotFoundReason, clusterv1beta1.ConditionSeverityError, "resource pool %s is misconfigured", resourcePool) + deprecatedv1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ Type: infrav1.VSphereDeploymentZonePlacementConstraintReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereDeploymentZonePlacementConstraintResourcePoolNotFoundV1Beta2Reason, @@ -239,8 +240,8 @@ func (r vsphereDeploymentZoneReconciler) reconcilePlacementConstraint(ctx contex if folder := placementConstraint.Folder; folder != "" { if _, err := deploymentZoneCtx.AuthSession.Finder.Folder(ctx, placementConstraint.Folder); err != nil { - conditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.PlacementConstraintMetCondition, infrav1.FolderNotFoundReason, clusterv1.ConditionSeverityError, "folder %s is misconfigured", folder) - v1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ + deprecatedconditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.PlacementConstraintMetCondition, infrav1.FolderNotFoundReason, clusterv1beta1.ConditionSeverityError, "folder %s is misconfigured", folder) + deprecatedv1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ Type: infrav1.VSphereDeploymentZonePlacementConstraintReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereDeploymentZonePlacementConstraintFolderNotFoundV1Beta2Reason, @@ -250,8 +251,8 @@ func (r vsphereDeploymentZoneReconciler) reconcilePlacementConstraint(ctx contex } } - conditions.MarkTrue(deploymentZoneCtx.VSphereDeploymentZone, infrav1.PlacementConstraintMetCondition) - v1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ + deprecatedconditions.MarkTrue(deploymentZoneCtx.VSphereDeploymentZone, infrav1.PlacementConstraintMetCondition) + deprecatedv1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ Type: infrav1.VSphereDeploymentZonePlacementConstraintReadyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: infrav1.VSphereDeploymentZonePlacementConstraintReadyV1Beta2Reason, @@ -302,17 +303,17 @@ func (r vsphereDeploymentZoneReconciler) getVCenterSession(ctx context.Context, func (r vsphereDeploymentZoneReconciler) reconcileDelete(ctx context.Context, deploymentZoneCtx *capvcontext.VSphereDeploymentZoneContext) error { log := ctrl.LoggerFrom(ctx) - v1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ + deprecatedv1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ Type: infrav1.VSphereDeploymentZoneVCenterAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereDeploymentZoneVCenterAvailableDeletingV1Beta2Reason, }) - v1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ + deprecatedv1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ Type: infrav1.VSphereDeploymentZonePlacementConstraintReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereDeploymentZonePlacementConstraintDeletingV1Beta2Reason, }) - v1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ + deprecatedv1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ Type: infrav1.VSphereDeploymentZoneFailureDomainValidatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereDeploymentZoneFailureDomainDeletingV1Beta2Reason, diff --git a/controllers/vspheredeploymentzone_controller_domain.go b/controllers/vspheredeploymentzone_controller_domain.go index 5dc516d198..793ec8bccf 100644 --- a/controllers/vspheredeploymentzone_controller_domain.go +++ b/controllers/vspheredeploymentzone_controller_domain.go @@ -23,10 +23,10 @@ import ( "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kerrors "k8s.io/apimachinery/pkg/util/errors" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" clusterutilv1 "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + deprecatedv1beta2conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions/v1beta2" ctrl "sigs.k8s.io/controller-runtime" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" @@ -39,8 +39,8 @@ import ( func (r vsphereDeploymentZoneReconciler) reconcileFailureDomain(ctx context.Context, deploymentZoneCtx *capvcontext.VSphereDeploymentZoneContext, vsphereFailureDomain *infrav1.VSphereFailureDomain) error { // verify the failure domain for the region if err := r.reconcileInfraFailureDomain(ctx, deploymentZoneCtx, vsphereFailureDomain, vsphereFailureDomain.Spec.Region); err != nil { - conditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.RegionMisconfiguredReason, clusterv1.ConditionSeverityError, err.Error()) - v1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ + deprecatedconditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.RegionMisconfiguredReason, clusterv1beta1.ConditionSeverityError, "%v", err) + deprecatedv1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ Type: infrav1.VSphereDeploymentZoneFailureDomainValidatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereDeploymentZoneFailureDomainRegionMisconfiguredV1Beta2Reason, @@ -51,8 +51,8 @@ func (r vsphereDeploymentZoneReconciler) reconcileFailureDomain(ctx context.Cont // verify the failure domain for the zone if err := r.reconcileInfraFailureDomain(ctx, deploymentZoneCtx, vsphereFailureDomain, vsphereFailureDomain.Spec.Zone); err != nil { - conditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.ZoneMisconfiguredReason, clusterv1.ConditionSeverityError, err.Error()) - v1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ + deprecatedconditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.ZoneMisconfiguredReason, clusterv1beta1.ConditionSeverityError, "%v", err) + deprecatedv1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ Type: infrav1.VSphereDeploymentZoneFailureDomainValidatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereDeploymentZoneFailureDomainZoneMisconfiguredV1Beta2Reason, @@ -83,7 +83,7 @@ func (r vsphereDeploymentZoneReconciler) reconcileFailureDomain(ctx context.Cont UID: deploymentZoneCtx.VSphereDeploymentZone.UID, }) }); err != nil { - v1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ + deprecatedv1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ Type: infrav1.VSphereDeploymentZoneFailureDomainValidatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereDeploymentZoneFailureDomainValidationFailedV1Beta2Reason, @@ -93,8 +93,8 @@ func (r vsphereDeploymentZoneReconciler) reconcileFailureDomain(ctx context.Cont } // Mark the VSphereDeploymentZone as having a valid VSphereFailureDomain. - conditions.MarkTrue(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition) - v1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ + deprecatedconditions.MarkTrue(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition) + deprecatedv1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ Type: infrav1.VSphereDeploymentZoneFailureDomainValidatedV1Beta2Condition, Status: metav1.ConditionTrue, Reason: infrav1.VSphereDeploymentZoneFailureDomainValidatedV1Beta2Reason, @@ -113,8 +113,8 @@ func (r vsphereDeploymentZoneReconciler) reconcileTopology(ctx context.Context, topology := vsphereFailureDomain.Spec.Topology if datastore := topology.Datastore; datastore != "" { if _, err := deploymentZoneCtx.AuthSession.Finder.Datastore(ctx, datastore); err != nil { - conditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.DatastoreNotFoundReason, clusterv1.ConditionSeverityError, "datastore %s is misconfigured", datastore) - v1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ + deprecatedconditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.DatastoreNotFoundReason, clusterv1beta1.ConditionSeverityError, "datastore %s is misconfigured", datastore) + deprecatedv1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ Type: infrav1.VSphereDeploymentZoneFailureDomainValidatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereDeploymentZoneFailureDomainDatastoreNotFoundV1Beta2Reason, @@ -126,8 +126,8 @@ func (r vsphereDeploymentZoneReconciler) reconcileTopology(ctx context.Context, for _, network := range topology.Networks { if _, err := deploymentZoneCtx.AuthSession.Finder.Network(ctx, network); err != nil { - conditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.NetworkNotFoundReason, clusterv1.ConditionSeverityError, "network %s is not found", network) - v1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ + deprecatedconditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.NetworkNotFoundReason, clusterv1beta1.ConditionSeverityError, "network %s is not found", network) + deprecatedv1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ Type: infrav1.VSphereDeploymentZoneFailureDomainValidatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereDeploymentZoneFailureDomainNetworkNotFoundV1Beta2Reason, @@ -139,8 +139,8 @@ func (r vsphereDeploymentZoneReconciler) reconcileTopology(ctx context.Context, for _, networkConfig := range topology.NetworkConfigurations { if _, err := deploymentZoneCtx.AuthSession.Finder.Network(ctx, networkConfig.NetworkName); err != nil { - conditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.NetworkNotFoundReason, clusterv1.ConditionSeverityError, "network %s is not found", networkConfig.NetworkName) - v1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ + deprecatedconditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.NetworkNotFoundReason, clusterv1beta1.ConditionSeverityError, "network %s is not found", networkConfig.NetworkName) + deprecatedv1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ Type: infrav1.VSphereDeploymentZoneFailureDomainValidatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereDeploymentZoneFailureDomainNetworkNotFoundV1Beta2Reason, @@ -153,8 +153,8 @@ func (r vsphereDeploymentZoneReconciler) reconcileTopology(ctx context.Context, if hostPlacementInfo := topology.Hosts; hostPlacementInfo != nil { rule, err := cluster.VerifyAffinityRule(ctx, deploymentZoneCtx, *topology.ComputeCluster, hostPlacementInfo.HostGroupName, hostPlacementInfo.VMGroupName) if err != nil { - conditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.HostsMisconfiguredReason, clusterv1.ConditionSeverityError, "vm host affinity does not exist") - v1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ + deprecatedconditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.HostsMisconfiguredReason, clusterv1beta1.ConditionSeverityError, "vm host affinity does not exist") + deprecatedv1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ Type: infrav1.VSphereDeploymentZoneFailureDomainValidatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereDeploymentZoneFailureDomainHostsMisconfiguredV1Beta2Reason, @@ -179,8 +179,8 @@ func (r vsphereDeploymentZoneReconciler) reconcileComputeCluster(ctx context.Con ccr, err := deploymentZoneCtx.AuthSession.Finder.ClusterComputeResource(ctx, *computeCluster) if err != nil { - conditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.ComputeClusterNotFoundReason, clusterv1.ConditionSeverityError, "compute cluster %s not found", *computeCluster) - v1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ + deprecatedconditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.ComputeClusterNotFoundReason, clusterv1beta1.ConditionSeverityError, "compute cluster %s not found", *computeCluster) + deprecatedv1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ Type: infrav1.VSphereDeploymentZoneFailureDomainValidatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereDeploymentZoneFailureDomainComputeClusterNotFoundV1Beta2Reason, @@ -197,8 +197,8 @@ func (r vsphereDeploymentZoneReconciler) reconcileComputeCluster(ctx context.Con ref, err := rp.Owner(ctx) if err != nil { - conditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.ComputeClusterNotFoundReason, clusterv1.ConditionSeverityError, "resource pool owner not found") - v1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ + deprecatedconditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.ComputeClusterNotFoundReason, clusterv1beta1.ConditionSeverityError, "resource pool owner not found") + deprecatedv1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ Type: infrav1.VSphereDeploymentZoneFailureDomainValidatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereDeploymentZoneFailureDomainComputeClusterNotFoundV1Beta2Reason, @@ -207,8 +207,8 @@ func (r vsphereDeploymentZoneReconciler) reconcileComputeCluster(ctx context.Con return errors.Wrap(err, "unable to find owner compute resource") } if ref.Reference() != ccr.Reference() { - conditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.ResourcePoolNotFoundReason, clusterv1.ConditionSeverityError, "resource pool is not owned by compute cluster") - v1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ + deprecatedconditions.MarkFalse(deploymentZoneCtx.VSphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition, infrav1.ResourcePoolNotFoundReason, clusterv1beta1.ConditionSeverityError, "resource pool is not owned by compute cluster") + deprecatedv1beta2conditions.Set(deploymentZoneCtx.VSphereDeploymentZone, metav1.Condition{ Type: infrav1.VSphereDeploymentZoneFailureDomainValidatedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereDeploymentZoneFailureDomainResourcePoolNotFoundV1Beta2Reason, diff --git a/controllers/vspheredeploymentzone_controller_test.go b/controllers/vspheredeploymentzone_controller_test.go index 6a87fd8251..0f685aa4f7 100644 --- a/controllers/vspheredeploymentzone_controller_test.go +++ b/controllers/vspheredeploymentzone_controller_test.go @@ -27,9 +27,9 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" @@ -137,9 +137,9 @@ var _ = Describe("VSphereDeploymentZoneReconciler", func() { if err := testEnv.Get(ctx, deploymentZoneKey, vsphereDeploymentZone); err != nil { return false } - return conditions.IsTrue(vsphereDeploymentZone, infrav1.VCenterAvailableCondition) && - conditions.IsTrue(vsphereDeploymentZone, infrav1.PlacementConstraintMetCondition) && - conditions.IsTrue(vsphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition) + return deprecatedconditions.IsTrue(vsphereDeploymentZone, infrav1.VCenterAvailableCondition) && + deprecatedconditions.IsTrue(vsphereDeploymentZone, infrav1.PlacementConstraintMetCondition) && + deprecatedconditions.IsTrue(vsphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition) }, timeout).Should(BeTrue()) Expect(testEnv.Get(ctx, failureDomainKey, vsphereFailureDomain)).To(Succeed()) @@ -204,7 +204,7 @@ var _ = Describe("VSphereDeploymentZoneReconciler", func() { if err := testEnv.Get(ctx, deploymentZoneKey, vsphereDeploymentZone); err != nil { return false } - return conditions.IsFalse(vsphereDeploymentZone, infrav1.PlacementConstraintMetCondition) + return deprecatedconditions.IsFalse(vsphereDeploymentZone, infrav1.PlacementConstraintMetCondition) }, timeout).Should(BeTrue()) }) }) @@ -381,9 +381,9 @@ func TestVSphereDeploymentZone_Reconcile(t *testing.T) { if err := testEnv.Get(ctx, client.ObjectKeyFromObject(vsphereDeploymentZone), vsphereDeploymentZone); err != nil { return false } - return conditions.IsTrue(vsphereDeploymentZone, infrav1.VCenterAvailableCondition) && - conditions.IsTrue(vsphereDeploymentZone, infrav1.PlacementConstraintMetCondition) && - conditions.IsTrue(vsphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition) + return deprecatedconditions.IsTrue(vsphereDeploymentZone, infrav1.VCenterAvailableCondition) && + deprecatedconditions.IsTrue(vsphereDeploymentZone, infrav1.PlacementConstraintMetCondition) && + deprecatedconditions.IsTrue(vsphereDeploymentZone, infrav1.VSphereFailureDomainValidatedCondition) }, timeout).Should(BeTrue()) g.Expect(testEnv.Get(ctx, client.ObjectKeyFromObject(vsphereFailureDomain), vsphereFailureDomain)).To(Succeed()) diff --git a/controllers/vspheremachine_controller.go b/controllers/vspheremachine_controller.go index 3f40556959..4f8fd62062 100644 --- a/controllers/vspheremachine_controller.go +++ b/controllers/vspheremachine_controller.go @@ -32,14 +32,16 @@ import ( "k8s.io/apimachinery/pkg/util/validation" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" clusterutilv1 "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + deprecatedv1beta2conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions/v1beta2" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/paused" "sigs.k8s.io/cluster-api/util/finalizers" clog "sigs.k8s.io/cluster-api/util/log" - "sigs.k8s.io/cluster-api/util/patch" - "sigs.k8s.io/cluster-api/util/paused" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" ctrlbldr "sigs.k8s.io/controller-runtime/pkg/builder" @@ -113,7 +115,7 @@ func AddMachineControllerToManager(ctx context.Context, controllerManagerContext &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(r.enqueueClusterToMachineRequests), ctrlbldr.WithPredicates( - predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), predicateLog), + predicates.ClusterPausedTransitionsOrInfrastructureProvisioned(mgr.GetScheme(), predicateLog), ), ). // Watch a GenericEvent channel for the controlled resource. @@ -170,7 +172,7 @@ func AddMachineControllerToManager(ctx context.Context, controllerManagerContext &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(r.enqueueClusterToMachineRequests), ctrlbldr.WithPredicates( - predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), predicateLog), + predicates.ClusterPausedTransitionsOrInfrastructureProvisioned(mgr.GetScheme(), predicateLog), ), ).Complete(r) } @@ -253,15 +255,15 @@ func (r *machineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ // Before computing ready condition, make sure that VirtualMachineProvisioned is always set. // NOTE: This is required because v1beta2 conditions comply to guideline requiring conditions to be set at the // first reconcile. - if c := v1beta2conditions.Get(machineContext.GetVSphereMachine(), infrav1.VSphereMachineVirtualMachineProvisionedV1Beta2Condition); c == nil { + if c := deprecatedv1beta2conditions.Get(machineContext.GetVSphereMachine(), infrav1.VSphereMachineVirtualMachineProvisionedV1Beta2Condition); c == nil { if machineContext.GetReady() { - v1beta2conditions.Set(machineContext.GetVSphereMachine(), metav1.Condition{ + deprecatedv1beta2conditions.Set(machineContext.GetVSphereMachine(), metav1.Condition{ Type: infrav1.VSphereMachineVirtualMachineProvisionedV1Beta2Condition, Status: metav1.ConditionTrue, Reason: infrav1.VSphereMachineVirtualMachineProvisionedV1Beta2Reason, }) } else { - v1beta2conditions.Set(machineContext.GetVSphereMachine(), metav1.Condition{ + deprecatedv1beta2conditions.Set(machineContext.GetVSphereMachine(), metav1.Condition{ Type: infrav1.VSphereMachineVirtualMachineProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereMachineVirtualMachineNotProvisionedV1Beta2Reason, @@ -270,21 +272,21 @@ func (r *machineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ } // always update the readyCondition. - conditions.SetSummary(machineContext.GetVSphereMachine(), - conditions.WithConditions( + deprecatedconditions.SetSummary(machineContext.GetVSphereMachine(), + deprecatedconditions.WithConditions( infrav1.VMProvisionedCondition, ), ) - if err := v1beta2conditions.SetSummaryCondition(machineContext.GetVSphereMachine(), machineContext.GetVSphereMachine(), infrav1.VSphereMachineReadyV1Beta2Condition, - v1beta2conditions.ForConditionTypes{ + if err := deprecatedv1beta2conditions.SetSummaryCondition(machineContext.GetVSphereMachine(), machineContext.GetVSphereMachine(), infrav1.VSphereMachineReadyV1Beta2Condition, + deprecatedv1beta2conditions.ForConditionTypes{ infrav1.VSphereMachineVirtualMachineProvisionedV1Beta2Condition, }, // Using a custom merge strategy to override reasons applied during merge. - v1beta2conditions.CustomMergeStrategy{ - MergeStrategy: v1beta2conditions.DefaultMergeStrategy( + deprecatedv1beta2conditions.CustomMergeStrategy{ + MergeStrategy: deprecatedv1beta2conditions.DefaultMergeStrategy( // Use custom reasons. - v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + deprecatedv1beta2conditions.ComputeReasonFunc(deprecatedv1beta2conditions.GetDefaultComputeMergeReasonFunc( infrav1.VSphereMachineNotReadyV1Beta2Reason, infrav1.VSphereMachineReadyUnknownV1Beta2Reason, infrav1.VSphereMachineReadyV1Beta2Reason, @@ -345,8 +347,8 @@ func (r *machineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ func (r *machineReconciler) reconcileDelete(ctx context.Context, machineCtx capvcontext.MachineContext) (reconcile.Result, error) { log := ctrl.LoggerFrom(ctx) - conditions.MarkFalse(machineCtx.GetVSphereMachine(), infrav1.VMProvisionedCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") - v1beta2conditions.Set(machineCtx.GetVSphereMachine(), metav1.Condition{ + deprecatedconditions.MarkFalse(machineCtx.GetVSphereMachine(), infrav1.VMProvisionedCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + deprecatedv1beta2conditions.Set(machineCtx.GetVSphereMachine(), metav1.Condition{ Type: infrav1.VSphereMachineVirtualMachineProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereMachineVirtualMachineDeletingV1Beta2Reason, @@ -360,8 +362,8 @@ func (r *machineReconciler) reconcileDelete(ctx context.Context, machineCtx capv } return reconcile.Result{}, nil } - conditions.MarkFalse(machineCtx.GetVSphereMachine(), infrav1.VMProvisionedCondition, clusterv1.DeletionFailedReason, clusterv1.ConditionSeverityWarning, "") - v1beta2conditions.Set(machineCtx.GetVSphereMachine(), metav1.Condition{ + deprecatedconditions.MarkFalse(machineCtx.GetVSphereMachine(), infrav1.VMProvisionedCondition, clusterv1beta1.DeletionFailedReason, clusterv1beta1.ConditionSeverityWarning, "") + deprecatedv1beta2conditions.Set(machineCtx.GetVSphereMachine(), metav1.Condition{ Type: infrav1.VSphereMachineVirtualMachineProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereMachineVirtualMachineDeletingV1Beta2Reason, @@ -389,15 +391,15 @@ func (r *machineReconciler) reconcileNormal(ctx context.Context, machineCtx capv return reconcile.Result{}, nil } - // Cluster `.status.infrastructureReady == false is handled differently depending on if the machine is supervisor based. + // Cluster `.status.initialization.infrastructureProvisioned == false is handled differently depending on if the machine is supervisor based. // 1) If the Cluster is not supervisor-based mark the VMProvisionedCondition false and return nil. - // 2) If the Cluster is supervisor-based continue to reconcile as InfrastructureReady is not set to true until after the kube apiserver is available. + // 2) If the Cluster is supervisor-based continue to reconcile as InfrastructureProvisioned is not set to true until after the kube apiserver is available. if !r.supervisorBased { // vmwarev1.VSphereCluster doesn't set Cluster.Status.Ready until the API endpoint is available. - if !machineCtx.GetCluster().Status.InfrastructureReady { + if machineCtx.GetCluster().Status.Initialization == nil || !machineCtx.GetCluster().Status.Initialization.InfrastructureProvisioned { log.Info("Cluster infrastructure is not ready yet, skipping reconciliation") - conditions.MarkFalse(machineCtx.GetVSphereMachine(), infrav1.VMProvisionedCondition, infrav1.WaitingForClusterInfrastructureReason, clusterv1.ConditionSeverityInfo, "") - v1beta2conditions.Set(machineCtx.GetVSphereMachine(), metav1.Condition{ + deprecatedconditions.MarkFalse(machineCtx.GetVSphereMachine(), infrav1.VMProvisionedCondition, infrav1.WaitingForClusterInfrastructureReason, clusterv1beta1.ConditionSeverityInfo, "") + deprecatedv1beta2conditions.Set(machineCtx.GetVSphereMachine(), metav1.Condition{ Type: infrav1.VSphereMachineVirtualMachineProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereMachineVirtualMachineWaitingForClusterInfrastructureReadyV1Beta2Reason, @@ -412,10 +414,10 @@ func (r *machineReconciler) reconcileNormal(ctx context.Context, machineCtx capv // Make sure bootstrap data is available and populated. if machineCtx.GetMachine().Spec.Bootstrap.DataSecretName == nil { - if !util.IsControlPlaneMachine(machineCtx.GetVSphereMachine()) && !conditions.IsTrue(machineCtx.GetCluster(), clusterv1.ControlPlaneInitializedCondition) { + if !util.IsControlPlaneMachine(machineCtx.GetVSphereMachine()) && !conditions.IsTrue(machineCtx.GetCluster(), clusterv1.ClusterControlPlaneInitializedCondition) { log.Info("Waiting for the control plane to be initialized, skipping reconciliation") - conditions.MarkFalse(machineCtx.GetVSphereMachine(), infrav1.VMProvisionedCondition, clusterv1.WaitingForControlPlaneAvailableReason, clusterv1.ConditionSeverityInfo, "") - v1beta2conditions.Set(machineCtx.GetVSphereMachine(), metav1.Condition{ + deprecatedconditions.MarkFalse(machineCtx.GetVSphereMachine(), infrav1.VMProvisionedCondition, clusterv1beta1.WaitingForControlPlaneAvailableReason, clusterv1beta1.ConditionSeverityInfo, "") + deprecatedv1beta2conditions.Set(machineCtx.GetVSphereMachine(), metav1.Condition{ Type: infrav1.VSphereMachineVirtualMachineProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereMachineVirtualMachineWaitingForControlPlaneInitializedV1Beta2Reason, @@ -423,8 +425,8 @@ func (r *machineReconciler) reconcileNormal(ctx context.Context, machineCtx capv return ctrl.Result{}, nil } log.Info("Waiting for bootstrap data to be ready, skipping reconciliation") - conditions.MarkFalse(machineCtx.GetVSphereMachine(), infrav1.VMProvisionedCondition, infrav1.WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "") - v1beta2conditions.Set(machineCtx.GetVSphereMachine(), metav1.Condition{ + deprecatedconditions.MarkFalse(machineCtx.GetVSphereMachine(), infrav1.VMProvisionedCondition, infrav1.WaitingForBootstrapDataReason, clusterv1beta1.ConditionSeverityInfo, "") + deprecatedv1beta2conditions.Set(machineCtx.GetVSphereMachine(), metav1.Condition{ Type: infrav1.VSphereMachineVirtualMachineProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereMachineVirtualMachineWaitingForBootstrapDataV1Beta2Reason, @@ -447,8 +449,8 @@ func (r *machineReconciler) reconcileNormal(ctx context.Context, machineCtx capv return reconcile.Result{}, errors.Wrapf(err, "failed to patch Machine with host info label") } - conditions.MarkTrue(machineCtx.GetVSphereMachine(), infrav1.VMProvisionedCondition) - v1beta2conditions.Set(machineCtx.GetVSphereMachine(), metav1.Condition{ + deprecatedconditions.MarkTrue(machineCtx.GetVSphereMachine(), infrav1.VMProvisionedCondition) + deprecatedv1beta2conditions.Set(machineCtx.GetVSphereMachine(), metav1.Condition{ Type: infrav1.VSphereMachineVirtualMachineProvisionedV1Beta2Condition, Status: metav1.ConditionTrue, Reason: infrav1.VSphereMachineVirtualMachineProvisionedV1Beta2Reason, diff --git a/controllers/vspheremachine_controller_test.go b/controllers/vspheremachine_controller_test.go index d83a9c5393..a7757e2d4e 100644 --- a/controllers/vspheremachine_controller_test.go +++ b/controllers/vspheremachine_controller_test.go @@ -24,10 +24,11 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" capiutil "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" - "sigs.k8s.io/cluster-api/util/patch" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/controller-runtime/pkg/client" ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -46,12 +47,12 @@ var _ = Describe("VsphereMachineReconciler", func() { key client.ObjectKey ) - isPresentAndFalseWithReason := func(getter conditions.Getter, condition clusterv1.ConditionType, reason string) bool { + isPresentAndFalseWithReason := func(getter deprecatedconditions.Getter, condition clusterv1beta1.ConditionType, reason string) bool { ExpectWithOffset(1, testEnv.Get(ctx, key, getter)).To(Succeed()) - if !conditions.Has(getter, condition) { + if !deprecatedconditions.Has(getter, condition) { return false } - objectCondition := conditions.Get(getter, condition) + objectCondition := deprecatedconditions.Get(getter, condition) return objectCondition.Status == corev1.ConditionFalse && objectCondition.Reason == reason } @@ -164,7 +165,10 @@ var _ = Describe("VsphereMachineReconciler", func() { Eventually(func() error { ph, err := patch.NewHelper(capiCluster, testEnv) Expect(err).ShouldNot(HaveOccurred()) - capiCluster.Status.InfrastructureReady = true + if capiCluster.Status.Initialization == nil { + capiCluster.Status.Initialization = &clusterv1.ClusterInitializationStatus{} + } + capiCluster.Status.Initialization.InfrastructureProvisioned = true return ph.Patch(ctx, capiCluster, patch.WithStatusObservedGeneration{}) }, timeout).Should(Succeed()) @@ -177,7 +181,10 @@ var _ = Describe("VsphereMachineReconciler", func() { BeforeEach(func() { ph, err := patch.NewHelper(capiCluster, testEnv) Expect(err).ShouldNot(HaveOccurred()) - capiCluster.Status.InfrastructureReady = true + if capiCluster.Status.Initialization == nil { + capiCluster.Status.Initialization = &clusterv1.ClusterInitializationStatus{} + } + capiCluster.Status.Initialization.InfrastructureProvisioned = true Expect(ph.Patch(ctx, capiCluster, patch.WithStatusObservedGeneration{})).To(Succeed()) }) diff --git a/controllers/vspherevm_controller.go b/controllers/vspherevm_controller.go index 0c0cef52c8..1a1b3ca103 100644 --- a/controllers/vspherevm_controller.go +++ b/controllers/vspherevm_controller.go @@ -31,16 +31,17 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/klog/v2" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + ipamv1beta1 "sigs.k8s.io/cluster-api/api/ipam/v1beta1" "sigs.k8s.io/cluster-api/controllers/clustercache" - ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" clusterutilv1 "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + deprecatedv1beta2conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions/v1beta2" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/paused" "sigs.k8s.io/cluster-api/util/finalizers" clog "sigs.k8s.io/cluster-api/util/log" - "sigs.k8s.io/cluster-api/util/patch" - "sigs.k8s.io/cluster-api/util/paused" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" ctrlbldr "sigs.k8s.io/controller-runtime/pkg/builder" @@ -118,7 +119,7 @@ func AddVMControllerToManager(ctx context.Context, controllerManagerCtx *capvcon }), ). Watches( - &ipamv1.IPAddressClaim{}, + &ipamv1beta1.IPAddressClaim{}, handler.EnqueueRequestsFromMapFunc(r.ipAddressClaimToVSphereVM), ). WatchesRawSource(r.clusterCache.GetClusterSource("vspherevm", r.clusterToVSphereVMs)). @@ -172,8 +173,8 @@ func (r vmReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.R authSession, err := r.retrieveVcenterSession(ctx, vsphereVM) if err != nil { - conditions.MarkFalse(vsphereVM, infrav1.VCenterAvailableCondition, infrav1.VCenterUnreachableReason, clusterv1.ConditionSeverityError, err.Error()) - v1beta2conditions.Set(vsphereVM, metav1.Condition{ + deprecatedconditions.MarkFalse(vsphereVM, infrav1.VCenterAvailableCondition, infrav1.VCenterUnreachableReason, clusterv1beta1.ConditionSeverityError, "%v", err) + deprecatedv1beta2conditions.Set(vsphereVM, metav1.Condition{ Type: infrav1.VSphereVMVCenterAvailableV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereVMVCenterUnreachableV1Beta2Reason, @@ -181,8 +182,8 @@ func (r vmReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.R }) return reconcile.Result{}, err } - conditions.MarkTrue(vsphereVM, infrav1.VCenterAvailableCondition) - v1beta2conditions.Set(vsphereVM, metav1.Condition{ + deprecatedconditions.MarkTrue(vsphereVM, infrav1.VCenterAvailableCondition) + deprecatedv1beta2conditions.Set(vsphereVM, metav1.Condition{ Type: infrav1.VSphereVMVCenterAvailableV1Beta2Condition, Status: metav1.ConditionTrue, Reason: infrav1.VSphereVMVCenterAvailableV1Beta2Reason, @@ -267,15 +268,15 @@ func (r vmReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.R // Before computing ready condition, make sure that VirtualMachineProvisioned is always set. // NOTE: This is required because v1beta2 conditions comply to guideline requiring conditions to be set at the // first reconcile. - if c := v1beta2conditions.Get(vmContext.VSphereVM, infrav1.VSphereVMVirtualMachineProvisionedV1Beta2Condition); c != nil { + if c := deprecatedv1beta2conditions.Get(vmContext.VSphereVM, infrav1.VSphereVMVirtualMachineProvisionedV1Beta2Condition); c != nil { if vmContext.VSphereVM.Status.Ready { - v1beta2conditions.Set(vmContext.VSphereVM, metav1.Condition{ + deprecatedv1beta2conditions.Set(vmContext.VSphereVM, metav1.Condition{ Type: infrav1.VSphereVMVirtualMachineProvisionedV1Beta2Condition, Status: metav1.ConditionTrue, Reason: infrav1.VSphereVMVirtualMachineProvisionedV1Beta2Reason, }) } else { - v1beta2conditions.Set(vmContext.VSphereVM, metav1.Condition{ + deprecatedv1beta2conditions.Set(vmContext.VSphereVM, metav1.Condition{ Type: infrav1.VSphereVMVirtualMachineProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereVMVirtualMachineNotProvisionedV1Beta2Reason, @@ -284,29 +285,29 @@ func (r vmReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.R } // always update the readyCondition. - conditions.SetSummary(vmContext.VSphereVM, - conditions.WithConditions( + deprecatedconditions.SetSummary(vmContext.VSphereVM, + deprecatedconditions.WithConditions( infrav1.VCenterAvailableCondition, infrav1.IPAddressClaimedCondition, infrav1.VMProvisionedCondition, ), ) - if err := v1beta2conditions.SetSummaryCondition(vmContext.VSphereVM, vmContext.VSphereVM, infrav1.VSphereVMReadyV1Beta2Condition, - v1beta2conditions.ForConditionTypes{ + if err := deprecatedv1beta2conditions.SetSummaryCondition(vmContext.VSphereVM, vmContext.VSphereVM, infrav1.VSphereVMReadyV1Beta2Condition, + deprecatedv1beta2conditions.ForConditionTypes{ infrav1.VSphereVMVCenterAvailableV1Beta2Condition, infrav1.VSphereVMVirtualMachineProvisionedV1Beta2Condition, infrav1.VSphereVMIPAddressClaimsFulfilledV1Beta2Condition, }, - v1beta2conditions.IgnoreTypesIfMissing{ + deprecatedv1beta2conditions.IgnoreTypesIfMissing{ infrav1.VSphereVMVCenterAvailableV1Beta2Condition, infrav1.VSphereVMIPAddressClaimsFulfilledV1Beta2Condition, }, // Using a custom merge strategy to override reasons applied during merge. - v1beta2conditions.CustomMergeStrategy{ - MergeStrategy: v1beta2conditions.DefaultMergeStrategy( + deprecatedv1beta2conditions.CustomMergeStrategy{ + MergeStrategy: deprecatedv1beta2conditions.DefaultMergeStrategy( // Use custom reasons. - v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + deprecatedv1beta2conditions.ComputeReasonFunc(deprecatedv1beta2conditions.GetDefaultComputeMergeReasonFunc( infrav1.VSphereVMNotReadyV1Beta2Reason, infrav1.VSphereVMReadyUnknownV1Beta2Reason, infrav1.VSphereVMReadyV1Beta2Reason, @@ -376,16 +377,16 @@ func (r vmReconciler) reconcile(ctx context.Context, vmCtx *capvcontext.VMContex func (r vmReconciler) reconcileDelete(ctx context.Context, vmCtx *capvcontext.VMContext) (reconcile.Result, error) { log := ctrl.LoggerFrom(ctx) - conditions.MarkFalse(vmCtx.VSphereVM, infrav1.VMProvisionedCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") - v1beta2conditions.Set(vmCtx.VSphereVM, metav1.Condition{ + deprecatedconditions.MarkFalse(vmCtx.VSphereVM, infrav1.VMProvisionedCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + deprecatedv1beta2conditions.Set(vmCtx.VSphereVM, metav1.Condition{ Type: infrav1.VSphereVMVirtualMachineProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereVMVirtualMachineDeletingV1Beta2Reason, }) result, vm, err := r.VMService.DestroyVM(ctx, vmCtx) if err != nil { - conditions.MarkFalse(vmCtx.VSphereVM, infrav1.VMProvisionedCondition, "DeletionFailed", clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(vmCtx.VSphereVM, metav1.Condition{ + deprecatedconditions.MarkFalse(vmCtx.VSphereVM, infrav1.VMProvisionedCondition, "DeletionFailed", clusterv1beta1.ConditionSeverityWarning, "%v", err) + deprecatedv1beta2conditions.Set(vmCtx.VSphereVM, metav1.Condition{ Type: infrav1.VSphereVMVirtualMachineProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereVMVirtualMachineDeletingV1Beta2Reason, @@ -468,8 +469,8 @@ func (r vmReconciler) reconcileNormal(ctx context.Context, vmCtx *capvcontext.VM } if r.isWaitingForStaticIPAllocation(vmCtx) { - conditions.MarkFalse(vmCtx.VSphereVM, infrav1.VMProvisionedCondition, infrav1.WaitingForStaticIPAllocationReason, clusterv1.ConditionSeverityInfo, "") - v1beta2conditions.Set(vmCtx.VSphereVM, metav1.Condition{ + deprecatedconditions.MarkFalse(vmCtx.VSphereVM, infrav1.VMProvisionedCondition, infrav1.WaitingForStaticIPAllocationReason, clusterv1beta1.ConditionSeverityInfo, "") + deprecatedv1beta2conditions.Set(vmCtx.VSphereVM, metav1.Condition{ Type: infrav1.VSphereVMVirtualMachineProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereVMVirtualMachineWaitingForStaticIPAllocationV1Beta2Reason, @@ -519,8 +520,8 @@ func (r vmReconciler) reconcileNormal(ctx context.Context, vmCtx *capvcontext.VM // we didn't get any addresses, requeue if len(vmCtx.VSphereVM.Status.Addresses) == 0 { - conditions.MarkFalse(vmCtx.VSphereVM, infrav1.VMProvisionedCondition, infrav1.WaitingForIPAllocationReason, clusterv1.ConditionSeverityInfo, "") - v1beta2conditions.Set(vmCtx.VSphereVM, metav1.Condition{ + deprecatedconditions.MarkFalse(vmCtx.VSphereVM, infrav1.VMProvisionedCondition, infrav1.WaitingForIPAllocationReason, clusterv1beta1.ConditionSeverityInfo, "") + deprecatedv1beta2conditions.Set(vmCtx.VSphereVM, metav1.Condition{ Type: infrav1.VSphereVMVirtualMachineProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereVMVirtualMachineWaitingForIPAllocationV1Beta2Reason, @@ -530,8 +531,8 @@ func (r vmReconciler) reconcileNormal(ctx context.Context, vmCtx *capvcontext.VM // Once the network is online the VM is considered ready. vmCtx.VSphereVM.Status.Ready = true - conditions.MarkTrue(vmCtx.VSphereVM, infrav1.VMProvisionedCondition) - v1beta2conditions.Set(vmCtx.VSphereVM, metav1.Condition{ + deprecatedconditions.MarkTrue(vmCtx.VSphereVM, infrav1.VMProvisionedCondition) + deprecatedv1beta2conditions.Set(vmCtx.VSphereVM, metav1.Condition{ Type: infrav1.VSphereVMVirtualMachineProvisionedV1Beta2Condition, Status: metav1.ConditionTrue, Reason: infrav1.VSphereVMVirtualMachineProvisionedV1Beta2Reason, @@ -631,7 +632,7 @@ func (r vmReconciler) vsphereClusterToVSphereVMs(ctx context.Context, a ctrlclie } func (r vmReconciler) ipAddressClaimToVSphereVM(_ context.Context, a ctrlclient.Object) []reconcile.Request { - ipAddressClaim, ok := a.(*ipamv1.IPAddressClaim) + ipAddressClaim, ok := a.(*ipamv1beta1.IPAddressClaim) if !ok { return nil } diff --git a/controllers/vspherevm_controller_test.go b/controllers/vspherevm_controller_test.go index 424cecb01f..41da50b877 100644 --- a/controllers/vspherevm_controller_test.go +++ b/controllers/vspherevm_controller_test.go @@ -29,11 +29,11 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" apirecord "k8s.io/client-go/tools/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + ipamv1beta1 "sigs.k8s.io/cluster-api/api/ipam/v1beta1" "sigs.k8s.io/cluster-api/controllers/clustercache" - ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -58,7 +58,7 @@ func TestReconcileNormal_WaitingForIPAddrAllocation(t *testing.T) { vsphereCluster *infrav1.VSphereCluster initObjs []client.Object - ipAddressClaim *ipamv1.IPAddressClaim + ipAddressClaim *ipamv1beta1.IPAddressClaim ) poolAPIGroup := "some.ipam.api.group" @@ -92,7 +92,9 @@ func TestReconcileNormal_WaitingForIPAddrAllocation(t *testing.T) { }, }, Status: clusterv1.ClusterStatus{ - InfrastructureReady: true, + Initialization: &clusterv1.ClusterInitializationStatus{ + InfrastructureProvisioned: true, + }, }, } @@ -143,7 +145,7 @@ func TestReconcileNormal_WaitingForIPAddrAllocation(t *testing.T) { Status: infrav1.VSphereVMStatus{}, } - ipAddressClaim = &ipamv1.IPAddressClaim{ + ipAddressClaim = &ipamv1beta1.IPAddressClaim{ TypeMeta: metav1.TypeMeta{ Kind: "IPAddressClaim", }, @@ -155,7 +157,7 @@ func TestReconcileNormal_WaitingForIPAddrAllocation(t *testing.T) { }, OwnerReferences: []metav1.OwnerReference{{APIVersion: infrav1.GroupVersion.String(), Kind: "VSphereVM", Name: "foo"}}, }, - Spec: ipamv1.IPAddressClaimSpec{ + Spec: ipamv1beta1.IPAddressClaimSpec{ PoolRef: corev1.TypedLocalObjectReference{ APIGroup: &poolAPIGroup, Kind: "IPAMPools", @@ -206,8 +208,8 @@ func TestReconcileNormal_WaitingForIPAddrAllocation(t *testing.T) { vmKey := util.ObjectKey(vsphereVM) g.Expect(r.Client.Get(context.Background(), vmKey, vm)).NotTo(HaveOccurred()) - g.Expect(conditions.Has(vm, infrav1.VMProvisionedCondition)).To(BeTrue()) - vmProvisionCondition := conditions.Get(vm, infrav1.VMProvisionedCondition) + g.Expect(deprecatedconditions.Has(vm, infrav1.VMProvisionedCondition)).To(BeTrue()) + vmProvisionCondition := deprecatedconditions.Get(vm, infrav1.VMProvisionedCondition) g.Expect(vmProvisionCondition.Status).To(Equal(corev1.ConditionFalse)) g.Expect(vmProvisionCondition.Reason).To(Equal(infrav1.WaitingForStaticIPAllocationReason)) }) @@ -242,8 +244,8 @@ func TestReconcileNormal_WaitingForIPAddrAllocation(t *testing.T) { vmKey := util.ObjectKey(vsphereVM) g.Expect(r.Client.Get(context.Background(), vmKey, vm)).NotTo(HaveOccurred()) - g.Expect(conditions.Has(vm, infrav1.VMProvisionedCondition)).To(BeTrue()) - vmProvisionCondition := conditions.Get(vm, infrav1.VMProvisionedCondition) + g.Expect(deprecatedconditions.Has(vm, infrav1.VMProvisionedCondition)).To(BeTrue()) + vmProvisionCondition := deprecatedconditions.Get(vm, infrav1.VMProvisionedCondition) g.Expect(vmProvisionCondition.Status).To(Equal(corev1.ConditionFalse)) g.Expect(vmProvisionCondition.Reason).To(Equal(infrav1.WaitingForIPAllocationReason)) }) @@ -292,7 +294,7 @@ func TestReconcileNormal_WaitingForIPAddrAllocation(t *testing.T) { vmKey := util.ObjectKey(vsphereVM) g.Expect(apierrors.IsNotFound(r.Client.Get(context.Background(), vmKey, vm))).To(BeTrue()) - claim := &ipamv1.IPAddressClaim{} + claim := &ipamv1beta1.IPAddressClaim{} ipacKey := util.ObjectKey(ipAddressClaim) g.Expect(r.Client.Get(context.Background(), ipacKey, claim)).NotTo(HaveOccurred()) g.Expect(claim.ObjectMeta.Finalizers).NotTo(ContainElement(infrav1.IPAddressClaimFinalizer)) @@ -498,8 +500,8 @@ func TestRetrievingVCenterCredentialsFromCluster(t *testing.T) { vm := &infrav1.VSphereVM{} vmKey := util.ObjectKey(vsphereVM) g.Expect(r.Client.Get(context.Background(), vmKey, vm)).NotTo(HaveOccurred()) - g.Expect(conditions.Has(vm, infrav1.VCenterAvailableCondition)).To(BeTrue()) - vCenterCondition := conditions.Get(vm, infrav1.VCenterAvailableCondition) + g.Expect(deprecatedconditions.Has(vm, infrav1.VCenterAvailableCondition)).To(BeTrue()) + vCenterCondition := deprecatedconditions.Get(vm, infrav1.VCenterAvailableCondition) g.Expect(vCenterCondition.Status).To(Equal(corev1.ConditionTrue)) }, ) diff --git a/controllers/vspherevm_ipaddress_reconciler.go b/controllers/vspherevm_ipaddress_reconciler.go index 3f3db49fa2..b591960a5d 100644 --- a/controllers/vspherevm_ipaddress_reconciler.go +++ b/controllers/vspherevm_ipaddress_reconciler.go @@ -26,11 +26,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/klog/v2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + ipamv1beta1 "sigs.k8s.io/cluster-api/api/ipam/v1beta1" clusterutilv1 "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + deprecatedv1beta2conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions/v1beta2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -51,8 +52,8 @@ func (r vmReconciler) reconcileIPAddressClaims(ctx context.Context, vmCtx *capvc log := ctrl.LoggerFrom(ctx) var ( - claims []conditions.Getter - v1beta2Claims []v1beta2conditions.Getter + claims []deprecatedconditions.Getter + v1beta2Claims []deprecatedv1beta2conditions.Getter errList []error ) @@ -60,7 +61,7 @@ func (r vmReconciler) reconcileIPAddressClaims(ctx context.Context, vmCtx *capvc for poolRefIdx, poolRef := range device.AddressesFromPools { totalClaims++ ipAddrClaimName := util.IPAddressClaimName(vmCtx.VSphereVM.Name, devIdx, poolRefIdx) - ipAddrClaim := &ipamv1.IPAddressClaim{} + ipAddrClaim := &ipamv1beta1.IPAddressClaim{} ipAddrClaimKey := client.ObjectKey{ Namespace: vmCtx.VSphereVM.Namespace, Name: ipAddrClaimName, @@ -88,7 +89,7 @@ func (r vmReconciler) reconcileIPAddressClaims(ctx context.Context, vmCtx *capvc // Since this is eventually used to calculate the status of the // IPAddressClaimed condition for the VSphereVM object. - if conditions.Has(ipAddrClaim, clusterv1.ReadyCondition) { + if deprecatedconditions.Has(ipAddrClaim, clusterv1beta1.ReadyCondition) { claims = append(claims, ipAddrClaim) v1beta2Claims = append(v1beta2Claims, ipAddrClaim) } @@ -97,12 +98,13 @@ func (r vmReconciler) reconcileIPAddressClaims(ctx context.Context, vmCtx *capvc if len(errList) > 0 { aggregatedErr := kerrors.NewAggregate(errList) - conditions.MarkFalse(vmCtx.VSphereVM, + deprecatedconditions.MarkFalse(vmCtx.VSphereVM, infrav1.IPAddressClaimedCondition, infrav1.IPAddressClaimNotFoundReason, - clusterv1.ConditionSeverityError, - aggregatedErr.Error()) - v1beta2conditions.Set(vmCtx.VSphereVM, metav1.Condition{ + clusterv1beta1.ConditionSeverityError, + "%v", + aggregatedErr) + deprecatedv1beta2conditions.Set(vmCtx.VSphereVM, metav1.Condition{ Type: infrav1.VSphereVMIPAddressClaimsFulfilledV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereVMIPAddressClaimsNotFulfilledV1Beta2Reason, @@ -116,18 +118,18 @@ func (r vmReconciler) reconcileIPAddressClaims(ctx context.Context, vmCtx *capvc // To correctly calculate the status of the condition, we would want all the IPAddressClaim objects // to report the Ready Condition. if len(claims) == totalClaims { - conditions.SetAggregate(vmCtx.VSphereVM, + deprecatedconditions.SetAggregate(vmCtx.VSphereVM, infrav1.IPAddressClaimedCondition, claims, - conditions.AddSourceRef(), - conditions.WithStepCounter()) + deprecatedconditions.AddSourceRef(), + deprecatedconditions.WithStepCounter()) if len(v1beta2Claims) > 0 { - if err := v1beta2conditions.SetAggregateCondition(v1beta2Claims, vmCtx.VSphereVM, clusterv1.ReadyV1Beta2Condition, v1beta2conditions.TargetConditionType(infrav1.VSphereVMIPAddressClaimsFulfilledV1Beta2Condition)); err != nil { + if err := deprecatedv1beta2conditions.SetAggregateCondition(v1beta2Claims, vmCtx.VSphereVM, clusterv1beta1.ReadyV1Beta2Condition, deprecatedv1beta2conditions.TargetConditionType(infrav1.VSphereVMIPAddressClaimsFulfilledV1Beta2Condition)); err != nil { return errors.Wrap(err, "failed to aggregate Ready condition from IPAddressClaims") } } else { - v1beta2conditions.Set(vmCtx.VSphereVM, metav1.Condition{ + deprecatedv1beta2conditions.Set(vmCtx.VSphereVM, metav1.Condition{ Type: infrav1.VSphereVMIPAddressClaimsFulfilledV1Beta2Condition, Status: metav1.ConditionTrue, Reason: infrav1.VSphereVMIPAddressClaimsNotFulfilledV1Beta2Reason, @@ -139,27 +141,27 @@ func (r vmReconciler) reconcileIPAddressClaims(ctx context.Context, vmCtx *capvc // Fallback logic to calculate the state of the IPAddressClaimed condition switch { case totalClaims == claimsFulfilled: - conditions.MarkTrue(vmCtx.VSphereVM, infrav1.IPAddressClaimedCondition) - v1beta2conditions.Set(vmCtx.VSphereVM, metav1.Condition{ + deprecatedconditions.MarkTrue(vmCtx.VSphereVM, infrav1.IPAddressClaimedCondition) + deprecatedv1beta2conditions.Set(vmCtx.VSphereVM, metav1.Condition{ Type: infrav1.VSphereVMIPAddressClaimsFulfilledV1Beta2Condition, Status: metav1.ConditionTrue, Reason: infrav1.VSphereVMIPAddressClaimsFulfilledV1Beta2Reason, }) case claimsFulfilled < totalClaims && claimsCreated > 0: - conditions.MarkFalse(vmCtx.VSphereVM, infrav1.IPAddressClaimedCondition, - infrav1.IPAddressClaimsBeingCreatedReason, clusterv1.ConditionSeverityInfo, + deprecatedconditions.MarkFalse(vmCtx.VSphereVM, infrav1.IPAddressClaimedCondition, + infrav1.IPAddressClaimsBeingCreatedReason, clusterv1beta1.ConditionSeverityInfo, "%d/%d claims being created", claimsCreated, totalClaims) - v1beta2conditions.Set(vmCtx.VSphereVM, metav1.Condition{ + deprecatedv1beta2conditions.Set(vmCtx.VSphereVM, metav1.Condition{ Type: infrav1.VSphereVMIPAddressClaimsFulfilledV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereVMIPAddressClaimsBeingCreatedV1Beta2Reason, Message: fmt.Sprintf("%d/%d claims being created", claimsCreated, totalClaims), }) case claimsFulfilled < totalClaims && claimsCreated == 0: - conditions.MarkFalse(vmCtx.VSphereVM, infrav1.IPAddressClaimedCondition, - infrav1.WaitingForIPAddressReason, clusterv1.ConditionSeverityInfo, + deprecatedconditions.MarkFalse(vmCtx.VSphereVM, infrav1.IPAddressClaimedCondition, + infrav1.WaitingForIPAddressReason, clusterv1beta1.ConditionSeverityInfo, "%d/%d claims being processed", totalClaims-claimsFulfilled, totalClaims) - v1beta2conditions.Set(vmCtx.VSphereVM, metav1.Condition{ + deprecatedv1beta2conditions.Set(vmCtx.VSphereVM, metav1.Condition{ Type: infrav1.VSphereVMIPAddressClaimsFulfilledV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereVMIPAddressClaimsWaitingForIPAddressV1Beta2Reason, @@ -173,8 +175,8 @@ func (r vmReconciler) reconcileIPAddressClaims(ctx context.Context, vmCtx *capvc // from an externally managed IPPool. Ensures that the claim has a reference to the cluster of the VM to // support pausing reconciliation. // The responsibility of the IP address resolution is handled by an external IPAM provider. -func createOrPatchIPAddressClaim(ctx context.Context, vmCtx *capvcontext.VMContext, name string, poolRef corev1.TypedLocalObjectReference) (*ipamv1.IPAddressClaim, bool, error) { - claim := &ipamv1.IPAddressClaim{ +func createOrPatchIPAddressClaim(ctx context.Context, vmCtx *capvcontext.VMContext, name string, poolRef corev1.TypedLocalObjectReference) (*ipamv1beta1.IPAddressClaim, bool, error) { + claim := &ipamv1beta1.IPAddressClaim{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: vmCtx.VSphereVM.Namespace, @@ -227,7 +229,7 @@ func (r vmReconciler) deleteIPAddressClaims(ctx context.Context, vmCtx *capvcont for devIdx, device := range vmCtx.VSphereVM.Spec.Network.Devices { for poolRefIdx := range device.AddressesFromPools { // check if claim exists - ipAddrClaim := &ipamv1.IPAddressClaim{} + ipAddrClaim := &ipamv1beta1.IPAddressClaim{} ipAddrClaimName := util.IPAddressClaimName(vmCtx.VSphereVM.Name, devIdx, poolRefIdx) ipAddrClaimKey := client.ObjectKey{ Namespace: vmCtx.VSphereVM.Namespace, diff --git a/controllers/vspherevm_ipaddress_reconciler_test.go b/controllers/vspherevm_ipaddress_reconciler_test.go index 936f119c45..55312ee0fe 100644 --- a/controllers/vspherevm_ipaddress_reconciler_test.go +++ b/controllers/vspherevm_ipaddress_reconciler_test.go @@ -24,9 +24,10 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + ipamv1beta1 "sigs.k8s.io/cluster-api/api/ipam/v1beta1" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/controller-runtime/pkg/client" ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -81,7 +82,7 @@ func Test_vmReconciler_reconcileIPAddressClaims(t *testing.T) { err := vmReconciler{}.reconcileIPAddressClaims(ctx, testCtx) g.Expect(err).ToNot(gomega.HaveOccurred()) - ipAddrClaimList := &ipamv1.IPAddressClaimList{} + ipAddrClaimList := &ipamv1beta1.IPAddressClaimList{} g.Expect(testCtx.Client.List(ctx, ipAddrClaimList)).To(gomega.Succeed()) g.Expect(ipAddrClaimList.Items).To(gomega.HaveLen(3)) @@ -95,21 +96,21 @@ func Test_vmReconciler_reconcileIPAddressClaims(t *testing.T) { g.Expect(claim.Labels).To(gomega.HaveKeyWithValue(clusterv1.ClusterNameLabel, "my-cluster")) } - claimedCondition := conditions.Get(testCtx.VSphereVM, infrav1.IPAddressClaimedCondition) + claimedCondition := deprecatedconditions.Get(testCtx.VSphereVM, infrav1.IPAddressClaimedCondition) g.Expect(claimedCondition).NotTo(gomega.BeNil()) g.Expect(claimedCondition.Status).To(gomega.Equal(corev1.ConditionFalse)) g.Expect(claimedCondition.Reason).To(gomega.Equal(infrav1.IPAddressClaimsBeingCreatedReason)) g.Expect(claimedCondition.Message).To(gomega.Equal("3/3 claims being created")) }) - ipAddrClaim := func(name, poolName string) *ipamv1.IPAddressClaim { - return &ipamv1.IPAddressClaim{ + ipAddrClaim := func(name, poolName string) *ipamv1beta1.IPAddressClaim { + return &ipamv1beta1.IPAddressClaim{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, }, - Spec: ipamv1.IPAddressClaimSpec{PoolRef: poolRef(poolName)}, - Status: ipamv1.IPAddressClaimStatus{}, + Spec: ipamv1beta1.IPAddressClaimSpec{PoolRef: poolRef(poolName)}, + Status: ipamv1beta1.IPAddressClaimStatus{}, } } @@ -124,13 +125,13 @@ func Test_vmReconciler_reconcileIPAddressClaims(t *testing.T) { err := vmReconciler{}.reconcileIPAddressClaims(ctx, testCtx) g.Expect(err).ToNot(gomega.HaveOccurred()) - claimedCondition := conditions.Get(testCtx.VSphereVM, infrav1.IPAddressClaimedCondition) + claimedCondition := deprecatedconditions.Get(testCtx.VSphereVM, infrav1.IPAddressClaimedCondition) g.Expect(claimedCondition).NotTo(gomega.BeNil()) g.Expect(claimedCondition.Status).To(gomega.Equal(corev1.ConditionFalse)) g.Expect(claimedCondition.Reason).To(gomega.Equal(infrav1.WaitingForIPAddressReason)) g.Expect(claimedCondition.Message).To(gomega.Equal("3/3 claims being processed")) - ipAddrClaimList := &ipamv1.IPAddressClaimList{} + ipAddrClaimList := &ipamv1beta1.IPAddressClaimList{} g.Expect(testCtx.Client.List(ctx, ipAddrClaimList)).To(gomega.Succeed()) for idx := range ipAddrClaimList.Items { @@ -160,11 +161,11 @@ func Test_vmReconciler_reconcileIPAddressClaims(t *testing.T) { err := vmReconciler{}.reconcileIPAddressClaims(ctx, testCtx) g.Expect(err).ToNot(gomega.HaveOccurred()) - claimedCondition := conditions.Get(testCtx.VSphereVM, infrav1.IPAddressClaimedCondition) + claimedCondition := deprecatedconditions.Get(testCtx.VSphereVM, infrav1.IPAddressClaimedCondition) g.Expect(claimedCondition).NotTo(gomega.BeNil()) g.Expect(claimedCondition.Status).To(gomega.Equal(corev1.ConditionTrue)) - ipAddrClaimList := &ipamv1.IPAddressClaimList{} + ipAddrClaimList := &ipamv1beta1.IPAddressClaimList{} g.Expect(testCtx.Client.List(ctx, ipAddrClaimList)).To(gomega.Succeed()) for idx := range ipAddrClaimList.Items { @@ -182,18 +183,18 @@ func Test_vmReconciler_reconcileIPAddressClaims(t *testing.T) { g := gomega.NewWithT(t) ipAddrClaimWithReadyConditionTrue := ipAddrClaim(util.IPAddressClaimName(name, 0, 0), "my-pool-1") - ipAddrClaimWithReadyConditionTrue.Status.Conditions = clusterv1.Conditions{ - *conditions.TrueCondition(clusterv1.ReadyCondition), + ipAddrClaimWithReadyConditionTrue.Status.Conditions = clusterv1beta1.Conditions{ + *deprecatedconditions.TrueCondition(clusterv1beta1.ReadyCondition), } ipAddrClaimWithReadyConditionFalse := ipAddrClaim(util.IPAddressClaimName(name, 1, 0), "my-pool-2") - ipAddrClaimWithReadyConditionFalse.Status.Conditions = clusterv1.Conditions{ - *conditions.FalseCondition(clusterv1.ReadyCondition, "IPAddressFetchProgress", clusterv1.ConditionSeverityInfo, ""), + ipAddrClaimWithReadyConditionFalse.Status.Conditions = clusterv1beta1.Conditions{ + *deprecatedconditions.FalseCondition(clusterv1beta1.ReadyCondition, "IPAddressFetchProgress", clusterv1beta1.ConditionSeverityInfo, ""), } secondIPAddrClaimWithReadyConditionTrue := ipAddrClaim(util.IPAddressClaimName(name, 1, 1), "my-pool-3") - secondIPAddrClaimWithReadyConditionTrue.Status.Conditions = clusterv1.Conditions{ - *conditions.TrueCondition(clusterv1.ReadyCondition), + secondIPAddrClaimWithReadyConditionTrue.Status.Conditions = clusterv1beta1.Conditions{ + *deprecatedconditions.TrueCondition(clusterv1beta1.ReadyCondition), } testCtx := setup(vsphereVM, @@ -204,7 +205,7 @@ func Test_vmReconciler_reconcileIPAddressClaims(t *testing.T) { err := vmReconciler{}.reconcileIPAddressClaims(ctx, testCtx) g.Expect(err).ToNot(gomega.HaveOccurred()) - claimedCondition := conditions.Get(testCtx.VSphereVM, infrav1.IPAddressClaimedCondition) + claimedCondition := deprecatedconditions.Get(testCtx.VSphereVM, infrav1.IPAddressClaimedCondition) g.Expect(claimedCondition).NotTo(gomega.BeNil()) g.Expect(claimedCondition.Status).To(gomega.Equal(corev1.ConditionFalse)) }) @@ -213,14 +214,14 @@ func Test_vmReconciler_reconcileIPAddressClaims(t *testing.T) { g := gomega.NewWithT(t) ipAddrClaimWithReadyConditionTrue := ipAddrClaim(util.IPAddressClaimName(name, 0, 0), "my-pool-1") - ipAddrClaimWithReadyConditionTrue.Status.Conditions = clusterv1.Conditions{ - *conditions.TrueCondition(clusterv1.ReadyCondition), + ipAddrClaimWithReadyConditionTrue.Status.Conditions = clusterv1beta1.Conditions{ + *deprecatedconditions.TrueCondition(clusterv1beta1.ReadyCondition), } ipAddrClaimWithReadyConditionTrue.Status.AddressRef.Name = "blah-one" ipAddrClaimWithReadyConditionFalse := ipAddrClaim(util.IPAddressClaimName(name, 1, 0), "my-pool-2") - ipAddrClaimWithReadyConditionFalse.Status.Conditions = clusterv1.Conditions{ - *conditions.FalseCondition(clusterv1.ReadyCondition, "IPAddressFetchProgress", clusterv1.ConditionSeverityInfo, ""), + ipAddrClaimWithReadyConditionFalse.Status.Conditions = clusterv1beta1.Conditions{ + *deprecatedconditions.FalseCondition(clusterv1beta1.ReadyCondition, "IPAddressFetchProgress", clusterv1beta1.ConditionSeverityInfo, ""), } iPAddrClaimWithNoReadyCondition := ipAddrClaim(util.IPAddressClaimName(name, 1, 1), "my-pool-3") @@ -233,7 +234,7 @@ func Test_vmReconciler_reconcileIPAddressClaims(t *testing.T) { err := vmReconciler{}.reconcileIPAddressClaims(ctx, testCtx) g.Expect(err).ToNot(gomega.HaveOccurred()) - claimedCondition := conditions.Get(testCtx.VSphereVM, infrav1.IPAddressClaimedCondition) + claimedCondition := deprecatedconditions.Get(testCtx.VSphereVM, infrav1.IPAddressClaimedCondition) g.Expect(claimedCondition).NotTo(gomega.BeNil()) g.Expect(claimedCondition.Status).To(gomega.Equal(corev1.ConditionFalse)) g.Expect(claimedCondition.Reason).To(gomega.Equal(infrav1.WaitingForIPAddressReason)) diff --git a/go.mod b/go.mod index 63a8e71878..ccf9d877eb 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,8 @@ module sigs.k8s.io/cluster-api-provider-vsphere -go 1.23.0 +go 1.24.0 -replace sigs.k8s.io/cluster-api => sigs.k8s.io/cluster-api v1.10.1 +replace sigs.k8s.io/cluster-api => sigs.k8s.io/cluster-api v1.10.0-beta.0.0.20250527052821-4642e1497262 replace github.com/vmware-tanzu/vm-operator/pkg/constants/testlabels => github.com/vmware-tanzu/vm-operator/pkg/constants/testlabels v0.0.0-20240404200847-de75746a9505 @@ -19,7 +19,6 @@ require ( github.com/blang/semver/v4 v4.0.0 github.com/go-logr/logr v1.4.2 github.com/go-task/slim-sprig/v3 v3.0.0 - github.com/google/gofuzz v1.2.0 github.com/google/uuid v1.6.0 github.com/onsi/ginkgo/v2 v2.23.4 github.com/onsi/gomega v1.37.0 @@ -32,24 +31,24 @@ require ( golang.org/x/tools v0.33.0 gopkg.in/gcfg.v1 v1.2.3 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.32.4 - k8s.io/apiextensions-apiserver v0.32.4 - k8s.io/apimachinery v0.32.4 - k8s.io/client-go v0.32.4 - k8s.io/cluster-bootstrap v0.32.4 - k8s.io/component-base v0.32.4 + k8s.io/api v0.33.1 + k8s.io/apiextensions-apiserver v0.33.1 + k8s.io/apimachinery v0.33.1 + k8s.io/client-go v0.33.1 + k8s.io/cluster-bootstrap v0.33.1 + k8s.io/component-base v0.33.1 k8s.io/klog/v2 v2.130.1 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 sigs.k8s.io/cluster-api v0.0.0-00010101000000-000000000000 - sigs.k8s.io/controller-runtime v0.20.4 + sigs.k8s.io/controller-runtime v0.21.0 + sigs.k8s.io/randfill v1.0.0 sigs.k8s.io/yaml v1.4.0 ) require ( - cel.dev/expr v0.18.0 // indirect + cel.dev/expr v0.19.1 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect - github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/emicklei/go-restful/v3 v3.12.2 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -57,36 +56,38 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/gnostic-models v0.6.9 // indirect github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/x448/float16 v0.8.4 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect - go.opentelemetry.io/otel v1.29.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.29.0 // indirect - go.opentelemetry.io/otel/sdk v1.29.0 // indirect - go.opentelemetry.io/otel/trace v1.29.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect + go.opentelemetry.io/otel v1.33.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect + go.opentelemetry.io/otel/metric v1.33.0 // indirect + go.opentelemetry.io/otel/sdk v1.33.0 // indirect + go.opentelemetry.io/otel/trace v1.33.0 // indirect + go.opentelemetry.io/proto/otlp v1.4.0 // indirect go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/net v0.40.0 // indirect - golang.org/x/oauth2 v0.28.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect golang.org/x/sync v0.14.0 // indirect golang.org/x/sys v0.33.0 // indirect golang.org/x/term v0.32.0 // indirect golang.org/x/text v0.25.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 // indirect - google.golang.org/grpc v1.67.3 // indirect + google.golang.org/grpc v1.68.2 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect ) require ( @@ -102,8 +103,7 @@ require ( github.com/go-openapi/swag v0.23.0 // indirect github.com/gobuffalo/flect v1.0.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.4 // indirect - github.com/google/cel-go v0.22.0 // indirect + github.com/google/cel-go v0.23.2 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -111,20 +111,20 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_golang v1.22.0 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/spf13/pflag v1.0.6 github.com/stoewer/go-strcase v1.3.0 // indirect - golang.org/x/time v0.8.0 // indirect + golang.org/x/time v0.9.0 // indirect gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect google.golang.org/protobuf v1.36.5 // indirect gopkg.in/fsnotify.v1 v1.4.7 gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiserver v0.32.4 // indirect - k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect + k8s.io/apiserver v0.33.1 // indirect + k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect ) diff --git a/go.sum b/go.sum index 6edf8afa6a..b2c73757b6 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,5 @@ -cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= -cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cel.dev/expr v0.19.1 h1:NciYrtDRIR0lNCnH1LFJegdjspNx9fI59O7TWcua/W4= +cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= @@ -17,8 +17,6 @@ github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbt github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= @@ -33,11 +31,6 @@ github.com/coredns/caddy v1.1.1 h1:2eYKZT7i6yxIfGP3qLJoJ7HAsDJqYB+X68g4NYjSrE0= github.com/coredns/caddy v1.1.1/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4= github.com/coredns/corefile-migration v1.0.26 h1:xiiEkVB1Dwolb24pkeDUDBfygV9/XsOSq79yFCrhptY= github.com/coredns/corefile-migration v1.0.26/go.mod h1:56DPqONc3njpVPsdilEnfijCwNGC3/kTJLl7i7SPavY= -github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= -github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= -github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -50,8 +43,6 @@ github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5 github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/dougm/pretty v0.0.0-20160325215624-add1dbc86daf h1:A2XbJkAuMMFy/9EftoubSKBUIyiOm6Z8+X5G7QpS6so= github.com/dougm/pretty v0.0.0-20160325215624-add1dbc86daf/go.mod h1:7NQ3kWOx2cZOSjtcveTa5nqupVr2s6/83sG+rTlI7uA= -github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= -github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= @@ -96,8 +87,6 @@ github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnD github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -106,10 +95,10 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= -github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8= -github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= -github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/cel-go v0.23.2 h1:UdEe3CvQh3Nv+E/j9r1Y//WO0K0cSyD7/y0bzyLIMI4= +github.com/google/cel-go v0.23.2/go.mod h1:52Pb6QsDbC5kvgxvZhiL9QX1oZEkcUF/ZqaPx1J5Wwo= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= @@ -124,24 +113,14 @@ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= -github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= -github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -151,6 +130,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -159,6 +140,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= @@ -197,23 +180,19 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= -github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= @@ -237,8 +216,6 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= -github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= github.com/vmware-tanzu/net-operator-api v0.0.0-20240326163340-1f32d6bf7f9d h1:cgx9UH/r53bKU/Gbv8IPsUZ34bj5+ItijA2JCUS3kVk= github.com/vmware-tanzu/net-operator-api v0.0.0-20240326163340-1f32d6bf7f9d/go.mod h1:JbFOh22iDsT5BowJe0GgpMI5e2/S7cWaJlv9LdURVQM= github.com/vmware-tanzu/nsx-operator/pkg/apis v0.0.0-20241112044858-9da8637c1b0d h1:z9lrzKVtNlujduv9BilzPxuge/LE2F0N1ms3TP4JZvw= @@ -251,44 +228,26 @@ github.com/vmware/govmomi v0.50.0 h1:vFOnUCBCX3m3MgTKfBp68Pz5gsHvKkO07Y2wCGYYQOM github.com/vmware/govmomi v0.50.0/go.mod h1:Z5uo7z0kRhVV00E4gfbUGwUaXIKTgqngsT+t/mIDpcI= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 h1:S2dVYn90KE98chqDkyE9Z4N61UnQd+KOfgp5Iu53llk= -github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= -go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= -go.etcd.io/etcd/api/v3 v3.5.20 h1:aKfz3nPZECWoZJXMSH9y6h2adXjtOHaHTGEVCuCmaz0= -go.etcd.io/etcd/api/v3 v3.5.20/go.mod h1:QqKGViq4KTgOG43dr/uH0vmGWIaoJY3ggFi6ZH0TH/U= -go.etcd.io/etcd/client/pkg/v3 v3.5.20 h1:sZIAtra+xCo56gdf6BR62to/hiie5Bwl7hQIqMzVTEM= -go.etcd.io/etcd/client/pkg/v3 v3.5.20/go.mod h1:qaOi1k4ZA9lVLejXNvyPABrVEe7VymMF2433yyRQ7O0= -go.etcd.io/etcd/client/v2 v2.305.16 h1:kQrn9o5czVNaukf2A2At43cE9ZtWauOtf9vRZuiKXow= -go.etcd.io/etcd/client/v2 v2.305.16/go.mod h1:h9YxWCzcdvZENbfzBTFCnoNumr2ax3F19sKMqHFmXHE= -go.etcd.io/etcd/client/v3 v3.5.20 h1:jMT2MwQEhyvhQg49Cec+1ZHJzfUf6ZgcmV0GjPv0tIQ= -go.etcd.io/etcd/client/v3 v3.5.20/go.mod h1:J5lbzYRMUR20YolS5UjlqqMcu3/wdEvG5VNBhzyo3m0= -go.etcd.io/etcd/pkg/v3 v3.5.16 h1:cnavs5WSPWeK4TYwPYfmcr3Joz9BH+TZ6qoUtz6/+mc= -go.etcd.io/etcd/pkg/v3 v3.5.16/go.mod h1:+lutCZHG5MBBFI/U4eYT5yL7sJfnexsoM20Y0t2uNuY= -go.etcd.io/etcd/raft/v3 v3.5.16 h1:zBXA3ZUpYs1AwiLGPafYAKKl/CORn/uaxYDwlNwndAk= -go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI= -go.etcd.io/etcd/server/v3 v3.5.16 h1:d0/SAdJ3vVsZvF8IFVb1k8zqMZ+heGcNfft71ul9GWE= -go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= -go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= -go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= -go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= -go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= -go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= -go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= -go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= -go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= +go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= +go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -319,8 +278,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= -golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc= -golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -343,8 +302,8 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= -golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= -golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -359,14 +318,12 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0= gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ= -google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80/go.mod h1:cc8bqMqtv9gMOr0zHg2Vzff5ULhhL2IXP4sbcn32Dro= google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 h1:TqExAhdPaB60Ux47Cn0oLV07rGnxZzIsaRhQaqS666A= google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA= -google.golang.org/grpc v1.67.3 h1:OgPcDAFKHnH8X3O4WcO4XUc8GRDeKsKReqbQtiCj7N8= -google.golang.org/grpc v1.67.3/go.mod h1:YGaHCc6Oap+FzBJTZLBzkGSYt/cvGPFTPxkn7QfSU8s= +google.golang.org/grpc v1.68.2 h1:EWN8x60kqfCcBXzbfPpEezgdYRZA9JCxtySmCtTUs2E= +google.golang.org/grpc v1.68.2/go.mod h1:AOXp0/Lj+nW5pJEgw8KQ6L1Ka+NTyJOABlSgfCrCN5A= google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -381,8 +338,6 @@ gopkg.in/gcfg.v1 v1.2.3 h1:m8OOJ4ccYHnx2f4gQwpno8nAX5OGOh7RLaaz0pj3Ogs= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= -gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= @@ -394,42 +349,45 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.32.4 h1:kw8Y/G8E7EpNy7gjB8gJZl3KJkNz8HM2YHrZPtAZsF4= -k8s.io/api v0.32.4/go.mod h1:5MYFvLvweRhyKylM3Es/6uh/5hGp0dg82vP34KifX4g= -k8s.io/apiextensions-apiserver v0.32.4 h1:IA+CoR63UDOijR/vEpow6wQnX4V6iVpzazJBskHrpHE= -k8s.io/apiextensions-apiserver v0.32.4/go.mod h1:Y06XO/b92H8ymOdG1HlA1submf7gIhbEDc3RjriqZOs= +k8s.io/api v0.33.1 h1:tA6Cf3bHnLIrUK4IqEgb2v++/GYUtqiu9sRVk3iBXyw= +k8s.io/api v0.33.1/go.mod h1:87esjTn9DRSRTD4fWMXamiXxJhpOIREjWOSjsW1kEHw= +k8s.io/apiextensions-apiserver v0.33.1 h1:N7ccbSlRN6I2QBcXevB73PixX2dQNIW0ZRuguEE91zI= +k8s.io/apiextensions-apiserver v0.33.1/go.mod h1:uNQ52z1A1Gu75QSa+pFK5bcXc4hq7lpOXbweZgi4dqA= k8s.io/apimachinery v0.17.4/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g= -k8s.io/apimachinery v0.32.4 h1:8EEksaxA7nd7xWJkkwLDN4SvWS5ot9g6Z/VZb3ju25I= -k8s.io/apimachinery v0.32.4/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/apiserver v0.32.4 h1:Yf7sd/y+GOQKH1Qf6wUeayZrYXe2SKZ17Bcq7VQM5HQ= -k8s.io/apiserver v0.32.4/go.mod h1:JFUMNtE2M5yqLZpIsgCb06SkVSW1YcxW1oyLSTfjXR8= -k8s.io/client-go v0.32.4 h1:zaGJS7xoYOYumoWIFXlcVrsiYioRPrXGO7dBfVC5R6M= -k8s.io/client-go v0.32.4/go.mod h1:k0jftcyYnEtwlFW92xC7MTtFv5BNcZBr+zn9jPlT9Ic= -k8s.io/cluster-bootstrap v0.32.4 h1:QO2rZ1KDLHaa3WKgpF3P26/5AKLFViMt0jUJptylCgs= -k8s.io/cluster-bootstrap v0.32.4/go.mod h1:+O5BK2t/VxGXcPPOn+SlpFrC0x78nnW6jnPI2MRhdz8= -k8s.io/component-base v0.32.4 h1:HuF+2JVLbFS5GODLIfPCb1Td6b+G2HszJoArcWOSr5I= -k8s.io/component-base v0.32.4/go.mod h1:10KloJEYw1keU/Xmjfy9TKJqUq7J2mYdiD1VDXoco4o= +k8s.io/apimachinery v0.33.1 h1:mzqXWV8tW9Rw4VeW9rEkqvnxj59k1ezDUl20tFK/oM4= +k8s.io/apimachinery v0.33.1/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/apiserver v0.33.1 h1:yLgLUPDVC6tHbNcw5uE9mo1T6ELhJj7B0geifra3Qdo= +k8s.io/apiserver v0.33.1/go.mod h1:VMbE4ArWYLO01omz+k8hFjAdYfc3GVAYPrhP2tTKccs= +k8s.io/client-go v0.33.1 h1:ZZV/Ks2g92cyxWkRRnfUDsnhNn28eFpt26aGc8KbXF4= +k8s.io/client-go v0.33.1/go.mod h1:JAsUrl1ArO7uRVFWfcj6kOomSlCv+JpvIsp6usAGefA= +k8s.io/cluster-bootstrap v0.33.1 h1:esGY+qXFJ78myppBzMVqqj37ReGLOJpQNslRiqmQGes= +k8s.io/cluster-bootstrap v0.33.1/go.mod h1:YA4FsgPShsVoP84DkBJEkCKDgsH4PpgTa0NzNBf6y4I= +k8s.io/component-base v0.33.1 h1:EoJ0xA+wr77T+G8p6T3l4efT2oNwbqBVKR71E0tBIaI= +k8s.io/component-base v0.33.1/go.mod h1:guT/w/6piyPfTgq7gfvgetyXMIh10zuXA6cRRm3rDuY= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= -sigs.k8s.io/cluster-api v1.10.1 h1:5vsLNgQ4SkPudJ1USK532B0SIdJxRsCNKt2DZtBf+ww= -sigs.k8s.io/cluster-api v1.10.1/go.mod h1:aiPMrNPoaJc/GuJ4TCpWX8bVe11+iCJ4HI0f3c9QiJg= -sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= -sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +sigs.k8s.io/cluster-api v1.10.0-beta.0.0.20250527052821-4642e1497262 h1:GcAHcaAycWzdtTRwW/rcQwcABVoCxyh+lJ6l2paqMoU= +sigs.k8s.io/cluster-api v1.10.0-beta.0.0.20250527052821-4642e1497262/go.mod h1:UmipfrOBTqjRNX7X4zuJCInq28/Fh6xq9RklOJ/DMR4= +sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= +sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/hack/ensure-go.sh b/hack/ensure-go.sh index 63a5f87f6b..5cb79b7310 100755 --- a/hack/ensure-go.sh +++ b/hack/ensure-go.sh @@ -38,7 +38,7 @@ EOF local go_version IFS=" " read -ra go_version <<< "$(go version)" local minimum_go_version - minimum_go_version=go1.23 + minimum_go_version=go1.24 if [[ "${minimum_go_version}" != $(echo -e "${minimum_go_version}\n${go_version[2]}" | sort -s -t. -k 1,1 -k 2,2n -k 3,3n | head -n1) && "${go_version[2]}" != "devel" ]]; then cat < sigs.k8s.io/cluster-api v1.10.1 +replace sigs.k8s.io/cluster-api => sigs.k8s.io/cluster-api v1.10.0-beta.0.0.20250527052821-4642e1497262 replace sigs.k8s.io/cluster-api-provider-vsphere => ../ @@ -10,13 +10,13 @@ require ( github.com/onsi/gomega v1.37.0 github.com/pkg/errors v0.9.1 github.com/spf13/cobra v1.9.1 - k8s.io/api v0.32.4 - k8s.io/apiextensions-apiserver v0.32.4 - k8s.io/apimachinery v0.32.4 + k8s.io/api v0.33.1 + k8s.io/apiextensions-apiserver v0.33.1 + k8s.io/apimachinery v0.33.1 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 sigs.k8s.io/cluster-api v0.0.0-00010101000000-000000000000 sigs.k8s.io/cluster-api-provider-vsphere v0.0.0-00010101000000-000000000000 - sigs.k8s.io/controller-runtime v0.20.4 + sigs.k8s.io/controller-runtime v0.21.0 sigs.k8s.io/kustomize/api v0.18.0 sigs.k8s.io/kustomize/kyaml v0.18.1 sigs.k8s.io/yaml v1.4.0 @@ -39,11 +39,9 @@ require ( github.com/go-openapi/swag v0.23.0 // indirect github.com/gobuffalo/flect v1.0.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/gnostic-models v0.6.9 // indirect github.com/google/go-cmp v0.7.0 // indirect - github.com/google/gofuzz v1.2.0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.6.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -54,32 +52,33 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_golang v1.22.0 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/spf13/pflag v1.0.6 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect - go.opentelemetry.io/otel v1.29.0 // indirect - go.opentelemetry.io/otel/trace v1.29.0 // indirect + go.opentelemetry.io/otel v1.33.0 // indirect + go.opentelemetry.io/otel/trace v1.33.0 // indirect golang.org/x/net v0.40.0 // indirect - golang.org/x/oauth2 v0.28.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect golang.org/x/sync v0.14.0 // indirect golang.org/x/sys v0.33.0 // indirect golang.org/x/term v0.32.0 // indirect golang.org/x/text v0.25.0 // indirect - golang.org/x/time v0.8.0 // indirect + golang.org/x/time v0.9.0 // indirect gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect google.golang.org/protobuf v1.36.5 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/client-go v0.32.4 // indirect - k8s.io/cluster-bootstrap v0.32.4 // indirect - k8s.io/component-base v0.32.4 // indirect + k8s.io/client-go v0.33.1 // indirect + k8s.io/cluster-bootstrap v0.33.1 // indirect + k8s.io/component-base v0.33.1 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect + k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect ) diff --git a/packaging/go.sum b/packaging/go.sum index 05a098a964..b83046517d 100644 --- a/packaging/go.sum +++ b/packaging/go.sum @@ -44,12 +44,10 @@ github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4 github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= -github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= @@ -70,6 +68,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -77,6 +77,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -97,16 +99,16 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= @@ -142,10 +144,10 @@ github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= -go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= -go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= -go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -165,8 +167,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= -golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc= -golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -183,8 +185,8 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= -golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= -golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -215,35 +217,38 @@ gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRN gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.32.4 h1:kw8Y/G8E7EpNy7gjB8gJZl3KJkNz8HM2YHrZPtAZsF4= -k8s.io/api v0.32.4/go.mod h1:5MYFvLvweRhyKylM3Es/6uh/5hGp0dg82vP34KifX4g= -k8s.io/apiextensions-apiserver v0.32.4 h1:IA+CoR63UDOijR/vEpow6wQnX4V6iVpzazJBskHrpHE= -k8s.io/apiextensions-apiserver v0.32.4/go.mod h1:Y06XO/b92H8ymOdG1HlA1submf7gIhbEDc3RjriqZOs= -k8s.io/apimachinery v0.32.4 h1:8EEksaxA7nd7xWJkkwLDN4SvWS5ot9g6Z/VZb3ju25I= -k8s.io/apimachinery v0.32.4/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/client-go v0.32.4 h1:zaGJS7xoYOYumoWIFXlcVrsiYioRPrXGO7dBfVC5R6M= -k8s.io/client-go v0.32.4/go.mod h1:k0jftcyYnEtwlFW92xC7MTtFv5BNcZBr+zn9jPlT9Ic= -k8s.io/cluster-bootstrap v0.32.4 h1:QO2rZ1KDLHaa3WKgpF3P26/5AKLFViMt0jUJptylCgs= -k8s.io/cluster-bootstrap v0.32.4/go.mod h1:+O5BK2t/VxGXcPPOn+SlpFrC0x78nnW6jnPI2MRhdz8= -k8s.io/component-base v0.32.4 h1:HuF+2JVLbFS5GODLIfPCb1Td6b+G2HszJoArcWOSr5I= -k8s.io/component-base v0.32.4/go.mod h1:10KloJEYw1keU/Xmjfy9TKJqUq7J2mYdiD1VDXoco4o= +k8s.io/api v0.33.1 h1:tA6Cf3bHnLIrUK4IqEgb2v++/GYUtqiu9sRVk3iBXyw= +k8s.io/api v0.33.1/go.mod h1:87esjTn9DRSRTD4fWMXamiXxJhpOIREjWOSjsW1kEHw= +k8s.io/apiextensions-apiserver v0.33.1 h1:N7ccbSlRN6I2QBcXevB73PixX2dQNIW0ZRuguEE91zI= +k8s.io/apiextensions-apiserver v0.33.1/go.mod h1:uNQ52z1A1Gu75QSa+pFK5bcXc4hq7lpOXbweZgi4dqA= +k8s.io/apimachinery v0.33.1 h1:mzqXWV8tW9Rw4VeW9rEkqvnxj59k1ezDUl20tFK/oM4= +k8s.io/apimachinery v0.33.1/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/client-go v0.33.1 h1:ZZV/Ks2g92cyxWkRRnfUDsnhNn28eFpt26aGc8KbXF4= +k8s.io/client-go v0.33.1/go.mod h1:JAsUrl1ArO7uRVFWfcj6kOomSlCv+JpvIsp6usAGefA= +k8s.io/cluster-bootstrap v0.33.1 h1:esGY+qXFJ78myppBzMVqqj37ReGLOJpQNslRiqmQGes= +k8s.io/cluster-bootstrap v0.33.1/go.mod h1:YA4FsgPShsVoP84DkBJEkCKDgsH4PpgTa0NzNBf6y4I= +k8s.io/component-base v0.33.1 h1:EoJ0xA+wr77T+G8p6T3l4efT2oNwbqBVKR71E0tBIaI= +k8s.io/component-base v0.33.1/go.mod h1:guT/w/6piyPfTgq7gfvgetyXMIh10zuXA6cRRm3rDuY= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/cluster-api v1.10.1 h1:5vsLNgQ4SkPudJ1USK532B0SIdJxRsCNKt2DZtBf+ww= -sigs.k8s.io/cluster-api v1.10.1/go.mod h1:aiPMrNPoaJc/GuJ4TCpWX8bVe11+iCJ4HI0f3c9QiJg= -sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= -sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= +sigs.k8s.io/cluster-api v1.10.0-beta.0.0.20250527052821-4642e1497262 h1:GcAHcaAycWzdtTRwW/rcQwcABVoCxyh+lJ6l2paqMoU= +sigs.k8s.io/cluster-api v1.10.0-beta.0.0.20250527052821-4642e1497262/go.mod h1:UmipfrOBTqjRNX7X4zuJCInq28/Fh6xq9RklOJ/DMR4= +sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= +sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= sigs.k8s.io/kustomize/api v0.18.0 h1:hTzp67k+3NEVInwz5BHyzc9rGxIauoXferXyjv5lWPo= sigs.k8s.io/kustomize/api v0.18.0/go.mod h1:f8isXnX+8b+SGLHQ6yO4JG1rdkZlvhaCf/uZbLVMb0U= sigs.k8s.io/kustomize/kyaml v0.18.1 h1:WvBo56Wzw3fjS+7vBjN6TeivvpbW9GmRaWZ9CIVmt4E= sigs.k8s.io/kustomize/kyaml v0.18.1/go.mod h1:C3L2BFVU1jgcddNBE1TxuVLgS46TjObMwW5FT9FcjYo= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= +sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/pkg/clustermodule/service_test.go b/pkg/clustermodule/service_test.go index 5d0dbc0b7d..002a0d3188 100644 --- a/pkg/clustermodule/service_test.go +++ b/pkg/clustermodule/service_test.go @@ -24,7 +24,7 @@ import ( "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" "sigs.k8s.io/cluster-api-provider-vsphere/internal/test/helpers/vcsim" diff --git a/pkg/clustermodule/types.go b/pkg/clustermodule/types.go index b9095a0880..53bbbe0c4c 100644 --- a/pkg/clustermodule/types.go +++ b/pkg/clustermodule/types.go @@ -17,8 +17,8 @@ limitations under the License. package clustermodule import ( - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client" ) diff --git a/pkg/context/cluster_context.go b/pkg/context/cluster_context.go index 79d24cb78d..4cec74bb29 100644 --- a/pkg/context/cluster_context.go +++ b/pkg/context/cluster_context.go @@ -19,8 +19,8 @@ package context import ( "fmt" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/patch" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" ) diff --git a/pkg/context/fake/fake_cluster_context.go b/pkg/context/fake/fake_cluster_context.go index 8dce59da0f..ceb49d46ed 100644 --- a/pkg/context/fake/fake_cluster_context.go +++ b/pkg/context/fake/fake_cluster_context.go @@ -21,7 +21,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" diff --git a/pkg/context/fake/fake_controller_manager_context.go b/pkg/context/fake/fake_controller_manager_context.go index 68e83492fa..644e2012a4 100644 --- a/pkg/context/fake/fake_controller_manager_context.go +++ b/pkg/context/fake/fake_controller_manager_context.go @@ -20,9 +20,9 @@ import ( vmoprv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha2" "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" - ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" + controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + ipamv1beta1 "sigs.k8s.io/cluster-api/api/ipam/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" ctrllog "sigs.k8s.io/controller-runtime/pkg/log" @@ -44,7 +44,7 @@ func NewControllerManagerContext(initObjects ...client.Object) *capvcontext.Cont _ = infrav1.AddToScheme(scheme) _ = vmwarev1.AddToScheme(scheme) _ = vmoprv1.AddToScheme(scheme) - _ = ipamv1.AddToScheme(scheme) + _ = ipamv1beta1.AddToScheme(scheme) clientWithObjects := fake.NewClientBuilder().WithScheme(scheme).WithStatusSubresource( &infrav1.VSphereVM{}, diff --git a/pkg/context/fake/fake_machine_context.go b/pkg/context/fake/fake_machine_context.go index 1b55f0d756..11432ae393 100644 --- a/pkg/context/fake/fake_machine_context.go +++ b/pkg/context/fake/fake_machine_context.go @@ -20,7 +20,7 @@ import ( "context" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" @@ -30,7 +30,7 @@ import ( // reconcilers with a fake client. func NewMachineContext(ctx context.Context, clusterCtx *capvcontext.ClusterContext, controllerManagerCtx *capvcontext.ControllerManagerContext) *capvcontext.VIMMachineContext { // Create the machine resources. - machine := newMachineV1a4() + machine := newMachineV1() vsphereMachine := newVSphereMachine(machine) // Add the cluster resources to the fake cluster client. @@ -52,7 +52,7 @@ func NewMachineContext(ctx context.Context, clusterCtx *capvcontext.ClusterConte } } -func newMachineV1a4() clusterv1.Machine { +func newMachineV1() clusterv1.Machine { dataSecretName := "fake-name" return clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/context/fake/fake_util.go b/pkg/context/fake/fake_util.go index c73ac10128..8fa4b94817 100644 --- a/pkg/context/fake/fake_util.go +++ b/pkg/context/fake/fake_util.go @@ -18,7 +18,7 @@ package fake import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" ) diff --git a/pkg/context/fake/fake_vm_context.go b/pkg/context/fake/fake_vm_context.go index 24365ac96e..ef8e4a022c 100644 --- a/pkg/context/fake/fake_vm_context.go +++ b/pkg/context/fake/fake_vm_context.go @@ -20,7 +20,7 @@ import ( "context" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" diff --git a/pkg/context/interfaces.go b/pkg/context/interfaces.go index 0fac9252ae..9b11aa236b 100644 --- a/pkg/context/interfaces.go +++ b/pkg/context/interfaces.go @@ -20,9 +20,9 @@ import ( "context" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + deprecatedv1beta2conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -41,6 +41,6 @@ type MachineContext interface { // VSphereMachine is a common interface used for VSphereMachines across VMOperator and non-VMOperator modes. type VSphereMachine interface { client.Object - conditions.Setter - v1beta2conditions.Setter + deprecatedconditions.Setter + deprecatedv1beta2conditions.Setter } diff --git a/pkg/context/machine_context.go b/pkg/context/machine_context.go index 300f7c0f02..ae10e7e5bb 100644 --- a/pkg/context/machine_context.go +++ b/pkg/context/machine_context.go @@ -21,8 +21,9 @@ import ( "fmt" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/patch" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" ) @@ -62,7 +63,7 @@ func (c *VIMMachineContext) Patch(ctx context.Context) error { return c.PatchHelper.Patch(ctx, c.VSphereMachine, patch.WithOwnedV1Beta2Conditions{Conditions: []string{ infrav1.VSphereMachineReadyV1Beta2Condition, infrav1.VSphereMachineVirtualMachineProvisionedV1Beta2Condition, - clusterv1.PausedV1Beta2Condition, + clusterv1beta1.PausedV1Beta2Condition, }}) } diff --git a/pkg/context/vm_context.go b/pkg/context/vm_context.go index f789e35fb4..550c4fdea2 100644 --- a/pkg/context/vm_context.go +++ b/pkg/context/vm_context.go @@ -21,8 +21,8 @@ import ( "context" "fmt" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/patch" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/session" @@ -52,7 +52,7 @@ func (c *VMContext) Patch(ctx context.Context) error { infrav1.VSphereVMIPAddressClaimsFulfilledV1Beta2Condition, infrav1.VSphereVMGuestSoftPowerOffSucceededV1Beta2Condition, infrav1.VSphereVMPCIDevicesDetachedV1Beta2Condition, - clusterv1.PausedV1Beta2Condition, + clusterv1beta1.PausedV1Beta2Condition, }}) } diff --git a/pkg/context/vmware/cluster_context.go b/pkg/context/vmware/cluster_context.go index cced6c0a81..85750be670 100644 --- a/pkg/context/vmware/cluster_context.go +++ b/pkg/context/vmware/cluster_context.go @@ -20,8 +20,8 @@ package vmware import ( "fmt" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/patch" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" ) diff --git a/pkg/context/vmware/machine_context.go b/pkg/context/vmware/machine_context.go index de5fc53e2c..127263631f 100644 --- a/pkg/context/vmware/machine_context.go +++ b/pkg/context/vmware/machine_context.go @@ -22,8 +22,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/patch" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" @@ -52,7 +52,7 @@ func (c *SupervisorMachineContext) Patch(ctx context.Context) error { return c.PatchHelper.Patch(ctx, c.VSphereMachine, patch.WithOwnedV1Beta2Conditions{Conditions: []string{ infrav1.VSphereMachineReadyV1Beta2Condition, infrav1.VSphereMachineVirtualMachineProvisionedV1Beta2Condition, - clusterv1.PausedV1Beta2Condition, + clusterv1beta1.PausedV1Beta2Condition, }}) } diff --git a/pkg/context/vspheredeploymentzone_context.go b/pkg/context/vspheredeploymentzone_context.go index 3e84cc4760..151ec720df 100644 --- a/pkg/context/vspheredeploymentzone_context.go +++ b/pkg/context/vspheredeploymentzone_context.go @@ -19,7 +19,7 @@ package context import ( "fmt" - "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/session" diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go index 09b3c57cd3..5adee76250 100644 --- a/pkg/manager/manager.go +++ b/pkg/manager/manager.go @@ -28,10 +28,10 @@ import ( "gopkg.in/fsnotify.v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" clientgoscheme "k8s.io/client-go/kubernetes/scheme" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" - controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" - ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" + controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + ipamv1beta1 "sigs.k8s.io/cluster-api/api/ipam/v1beta1" ctrl "sigs.k8s.io/controller-runtime" infrav1alpha3 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1alpha3" @@ -69,7 +69,7 @@ func New(ctx context.Context, opts Options) (Manager, error) { _ = netopv1.AddToScheme(opts.Scheme) _ = nsxvpcv1.AddToScheme(opts.Scheme) _ = topologyv1.AddToScheme(opts.Scheme) - _ = ipamv1.AddToScheme(opts.Scheme) + _ = ipamv1beta1.AddToScheme(opts.Scheme) // Build the controller manager. mgr, err := ctrl.NewManager(opts.KubeConfig, opts.Options) diff --git a/pkg/services/govmomi/create.go b/pkg/services/govmomi/create.go index febc1fdef0..f65f5575a2 100644 --- a/pkg/services/govmomi/create.go +++ b/pkg/services/govmomi/create.go @@ -20,7 +20,7 @@ import ( "context" "github.com/pkg/errors" - bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/services/govmomi/vcenter" diff --git a/pkg/services/govmomi/ipam/parse.go b/pkg/services/govmomi/ipam/parse.go index c71715657b..2b68b72182 100644 --- a/pkg/services/govmomi/ipam/parse.go +++ b/pkg/services/govmomi/ipam/parse.go @@ -20,7 +20,7 @@ import ( "fmt" "net/netip" - ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" + ipamv1beta1 "sigs.k8s.io/cluster-api/api/ipam/v1beta1" ) // prefixesAsStrings converts []netip.Prefix to []string. @@ -33,7 +33,7 @@ func prefixesAsStrings(prefixes []netip.Prefix) []string { } // parseAddressWithPrefix converts a *ipamv1.IPAddress to a string, e.g. '10.0.0.1/24'. -func parseAddressWithPrefix(ipamAddress *ipamv1.IPAddress) (netip.Prefix, error) { +func parseAddressWithPrefix(ipamAddress *ipamv1beta1.IPAddress) (netip.Prefix, error) { addressWithPrefix := fmt.Sprintf("%s/%d", ipamAddress.Spec.Address, ipamAddress.Spec.Prefix) parsedPrefix, err := netip.ParsePrefix(addressWithPrefix) if err != nil { @@ -53,7 +53,7 @@ func parseAddressWithPrefix(ipamAddress *ipamv1.IPAddress) (netip.Prefix, error) // family as the address on the ipamv1.IPAddress. Gateway addresses of one // family must match the other addresses of the same family. A gateway address // is optional. If it is not set this function returns `nil, nil`. -func parseGateway(ipamAddress *ipamv1.IPAddress, addressWithPrefix netip.Prefix, ipamDeviceConfig ipamDeviceConfig) (*netip.Addr, error) { +func parseGateway(ipamAddress *ipamv1beta1.IPAddress, addressWithPrefix netip.Prefix, ipamDeviceConfig ipamDeviceConfig) (*netip.Addr, error) { if ipamAddress.Spec.Gateway == "" { return nil, nil } diff --git a/pkg/services/govmomi/ipam/status.go b/pkg/services/govmomi/ipam/status.go index 6d9403bb63..ccc71f0303 100644 --- a/pkg/services/govmomi/ipam/status.go +++ b/pkg/services/govmomi/ipam/status.go @@ -28,7 +28,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" apitypes "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" - ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" + ipamv1beta1 "sigs.k8s.io/cluster-api/api/ipam/v1beta1" ctrl "sigs.k8s.io/controller-runtime" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" @@ -40,7 +40,7 @@ import ( // of parsing IPAM addresses for a given device. type ipamDeviceConfig struct { DeviceIndex int - IPAMAddresses []*ipamv1.IPAddress + IPAMAddresses []*ipamv1beta1.IPAddress MACAddress string NetworkSpecGateway4 string IPAMConfigGateway4 string @@ -131,7 +131,7 @@ func buildIPAMDeviceConfigs(ctx context.Context, vmCtx capvcontext.VMContext, ne } ipamDeviceConfig := ipamDeviceConfig{ - IPAMAddresses: []*ipamv1.IPAddress{}, + IPAMAddresses: []*ipamv1beta1.IPAddress{}, MACAddress: networkStatus[devIdx].MACAddr, NetworkSpecGateway4: networkSpecDevice.Gateway4, NetworkSpecGateway6: networkSpecDevice.Gateway6, @@ -161,7 +161,7 @@ func buildIPAMDeviceConfigs(ctx context.Context, vmCtx capvcontext.VMContext, ne continue } - ipAddr := &ipamv1.IPAddress{} + ipAddr := &ipamv1beta1.IPAddress{} ipAddrKey := apitypes.NamespacedName{ Namespace: vmCtx.VSphereVM.Namespace, Name: ipAddrName, @@ -185,10 +185,10 @@ func buildIPAMDeviceConfigs(ctx context.Context, vmCtx capvcontext.VMContext, ne } // getIPAddrClaim fetches an IPAddressClaim from the api with the given name. -func getIPAddrClaim(ctx context.Context, vmCtx capvcontext.VMContext, ipAddrClaimName string) (*ipamv1.IPAddressClaim, error) { +func getIPAddrClaim(ctx context.Context, vmCtx capvcontext.VMContext, ipAddrClaimName string) (*ipamv1beta1.IPAddressClaim, error) { log := ctrl.LoggerFrom(ctx) - ipAddrClaim := &ipamv1.IPAddressClaim{} + ipAddrClaim := &ipamv1beta1.IPAddressClaim{} ipAddrClaimKey := apitypes.NamespacedName{ Namespace: vmCtx.VSphereVM.Namespace, Name: ipAddrClaimName, diff --git a/pkg/services/govmomi/ipam/status_test.go b/pkg/services/govmomi/ipam/status_test.go index 28e3777fa9..985b448eec 100644 --- a/pkg/services/govmomi/ipam/status_test.go +++ b/pkg/services/govmomi/ipam/status_test.go @@ -25,7 +25,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" apitypes "k8s.io/apimachinery/pkg/types" - ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" + ipamv1beta1 "sigs.k8s.io/cluster-api/api/ipam/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" @@ -43,8 +43,8 @@ func Test_buildIPAMDeviceConfigs(t *testing.T) { vmCtx capvcontext.VMContext ctx context.Context networkStatus []infrav1.NetworkStatus - claim1, claim2, claim3 *ipamv1.IPAddressClaim - address1, address2, address3 *ipamv1.IPAddress + claim1, claim2, claim3 *ipamv1beta1.IPAddressClaim + address1, address2, address3 *ipamv1beta1.IPAddress g *gomega.WithT ) @@ -58,41 +58,41 @@ func Test_buildIPAMDeviceConfigs(t *testing.T) { g = gomega.NewWithT(t) namespace := "my-namespace" - claim1 = &ipamv1.IPAddressClaim{ + claim1 = &ipamv1beta1.IPAddressClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "vsphereVM1-0-0", Namespace: namespace, }, } - claim2 = &ipamv1.IPAddressClaim{ + claim2 = &ipamv1beta1.IPAddressClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "vsphereVM1-0-1", Namespace: namespace, }, } - claim3 = &ipamv1.IPAddressClaim{ + claim3 = &ipamv1beta1.IPAddressClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "vsphereVM1-0-2", Namespace: namespace, }, } - address1 = &ipamv1.IPAddress{ + address1 = &ipamv1beta1.IPAddress{ ObjectMeta: metav1.ObjectMeta{ Name: "vsphereVM1-0-0-address0", Namespace: namespace, }, } - address2 = &ipamv1.IPAddress{ + address2 = &ipamv1beta1.IPAddress{ ObjectMeta: metav1.ObjectMeta{ Name: "vsphereVM1-0-1-address1", Namespace: namespace, }, } - address3 = &ipamv1.IPAddress{ + address3 = &ipamv1beta1.IPAddress{ ObjectMeta: metav1.ObjectMeta{ Name: "vsphereVM1-0-2-address2", Namespace: namespace, @@ -149,7 +149,7 @@ func Test_buildIPAMDeviceConfigs(t *testing.T) { // Simulate IP provider reconciling one claim g.Expect(vmCtx.Client.Create(ctx, address3)).NotTo(gomega.HaveOccurred()) - ipAddrClaim := &ipamv1.IPAddressClaim{} + ipAddrClaim := &ipamv1beta1.IPAddressClaim{} ipAddrClaimKey := apitypes.NamespacedName{ Namespace: vmCtx.VSphereVM.Namespace, Name: "vsphereVM1-0-2", @@ -227,8 +227,8 @@ func Test_BuildState(t *testing.T) { ctx context.Context vmCtx capvcontext.VMContext networkStatus []infrav1.NetworkStatus - claim1, claim2, claim3 *ipamv1.IPAddressClaim - address1, address2, address3 *ipamv1.IPAddress + claim1, claim2, claim3 *ipamv1beta1.IPAddressClaim + address1, address2, address3 *ipamv1beta1.IPAddress g *gomega.WithT ) type nameservers struct { @@ -258,56 +258,56 @@ func Test_BuildState(t *testing.T) { g = gomega.NewWithT(t) - claim1 = &ipamv1.IPAddressClaim{ + claim1 = &ipamv1beta1.IPAddressClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "vsphereVM1-0-0", Namespace: "my-namespace", }, } - claim2 = &ipamv1.IPAddressClaim{ + claim2 = &ipamv1beta1.IPAddressClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "vsphereVM1-0-1", Namespace: "my-namespace", }, } - claim3 = &ipamv1.IPAddressClaim{ + claim3 = &ipamv1beta1.IPAddressClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "vsphereVM1-0-2", Namespace: "my-namespace", }, } - address1 = &ipamv1.IPAddress{ + address1 = &ipamv1beta1.IPAddress{ ObjectMeta: metav1.ObjectMeta{ Name: "vsphereVM1-0-0-address0", Namespace: "my-namespace", }, - Spec: ipamv1.IPAddressSpec{ + Spec: ipamv1beta1.IPAddressSpec{ Address: "10.0.0.50", Prefix: 24, Gateway: "10.0.0.1", }, } - address2 = &ipamv1.IPAddress{ + address2 = &ipamv1beta1.IPAddress{ ObjectMeta: metav1.ObjectMeta{ Name: "vsphereVM1-0-1-address1", Namespace: "my-namespace", }, - Spec: ipamv1.IPAddressSpec{ + Spec: ipamv1beta1.IPAddressSpec{ Address: "10.0.1.50", Prefix: 30, Gateway: "10.0.0.1", }, } - address3 = &ipamv1.IPAddress{ + address3 = &ipamv1beta1.IPAddress{ ObjectMeta: metav1.ObjectMeta{ Name: "vsphereVM1-0-2-address2", Namespace: "my-namespace", }, - Spec: ipamv1.IPAddressSpec{ + Spec: ipamv1beta1.IPAddressSpec{ Address: "fe80::cccc:12", Prefix: 64, Gateway: "fe80::cccc:1", @@ -364,7 +364,7 @@ func Test_BuildState(t *testing.T) { // Simulate IP provider reconciling one claim g.Expect(vmCtx.Client.Create(ctx, address3)).NotTo(gomega.HaveOccurred()) - ipAddrClaim := &ipamv1.IPAddressClaim{} + ipAddrClaim := &ipamv1beta1.IPAddressClaim{} ipAddrClaimKey := apitypes.NamespacedName{ Namespace: vmCtx.VSphereVM.Namespace, Name: "vsphereVM1-0-2", @@ -443,18 +443,18 @@ func Test_BuildState(t *testing.T) { devMAC0 := "0:0:0:0:a" devMAC1 := "0:0:0:0:b" - claim := &ipamv1.IPAddressClaim{ + claim := &ipamv1beta1.IPAddressClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "vsphereVM1-1-0", Namespace: "my-namespace", }, } - address := &ipamv1.IPAddress{ + address := &ipamv1beta1.IPAddress{ ObjectMeta: metav1.ObjectMeta{ Name: "vsphereVM1-1-0-address", Namespace: "my-namespace", }, - Spec: ipamv1.IPAddressSpec{ + Spec: ipamv1beta1.IPAddressSpec{ Address: "10.0.0.50", Prefix: 24, Gateway: "10.0.0.1", @@ -513,7 +513,7 @@ func Test_BuildState(t *testing.T) { // Simulate IP provider reconciling one claim g.Expect(vmCtx.Client.Create(ctx, address)).NotTo(gomega.HaveOccurred()) - ipAddrClaim := &ipamv1.IPAddressClaim{} + ipAddrClaim := &ipamv1beta1.IPAddressClaim{} ipAddrClaimKey := apitypes.NamespacedName{ Namespace: vmCtx.VSphereVM.Namespace, Name: "vsphereVM1-1-0", @@ -562,72 +562,72 @@ func Test_BuildState(t *testing.T) { beforeWithClaimsAndAddressCreated := func() { before() - claim1 = &ipamv1.IPAddressClaim{ + claim1 = &ipamv1beta1.IPAddressClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "vsphereVM1-0-0", Namespace: "my-namespace", }, - Status: ipamv1.IPAddressClaimStatus{ + Status: ipamv1beta1.IPAddressClaimStatus{ AddressRef: corev1.LocalObjectReference{ Name: "vsphereVM1-0-0", }, }, } - claim2 = &ipamv1.IPAddressClaim{ + claim2 = &ipamv1beta1.IPAddressClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "vsphereVM1-0-1", Namespace: "my-namespace", }, - Status: ipamv1.IPAddressClaimStatus{ + Status: ipamv1beta1.IPAddressClaimStatus{ AddressRef: corev1.LocalObjectReference{ Name: "vsphereVM1-0-1", }, }, } - claim3 = &ipamv1.IPAddressClaim{ + claim3 = &ipamv1beta1.IPAddressClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "vsphereVM1-1-0", Namespace: "my-namespace", }, - Status: ipamv1.IPAddressClaimStatus{ + Status: ipamv1beta1.IPAddressClaimStatus{ AddressRef: corev1.LocalObjectReference{ Name: "vsphereVM1-1-0", }, }, } - address1 = &ipamv1.IPAddress{ + address1 = &ipamv1beta1.IPAddress{ ObjectMeta: metav1.ObjectMeta{ Name: "vsphereVM1-0-0", Namespace: "my-namespace", }, - Spec: ipamv1.IPAddressSpec{ + Spec: ipamv1beta1.IPAddressSpec{ Address: "10.0.1.50", Prefix: 24, Gateway: "10.0.0.1", }, } - address2 = &ipamv1.IPAddress{ + address2 = &ipamv1beta1.IPAddress{ ObjectMeta: metav1.ObjectMeta{ Name: "vsphereVM1-0-1", Namespace: "my-namespace", }, - Spec: ipamv1.IPAddressSpec{ + Spec: ipamv1beta1.IPAddressSpec{ Address: "10.0.1.51", Prefix: 24, Gateway: "10.0.0.1", }, } - address3 = &ipamv1.IPAddress{ + address3 = &ipamv1beta1.IPAddress{ ObjectMeta: metav1.ObjectMeta{ Name: "vsphereVM1-1-0", Namespace: "my-namespace", }, - Spec: ipamv1.IPAddressSpec{ + Spec: ipamv1beta1.IPAddressSpec{ Address: "11.0.1.50", Prefix: 24, Gateway: "11.0.0.1", diff --git a/pkg/services/govmomi/power.go b/pkg/services/govmomi/power.go index 8170c71aad..088d8ea9d3 100644 --- a/pkg/services/govmomi/power.go +++ b/pkg/services/govmomi/power.go @@ -25,9 +25,9 @@ import ( "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + deprecatedv1beta2conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" @@ -52,7 +52,7 @@ func (vms *VMService) getPowerState(ctx context.Context, virtualMachineCtx *virt } func (vms *VMService) isSoftPowerOffTimeoutExceeded(vm *infrav1.VSphereVM) bool { - if !conditions.Has(vm, infrav1.GuestSoftPowerOffSucceededCondition) { + if !deprecatedconditions.Has(vm, infrav1.GuestSoftPowerOffSucceededCondition) { // The SoftPowerOff never got triggered, so it can't be timed out yet. return false } @@ -62,7 +62,7 @@ func (vms *VMService) isSoftPowerOffTimeoutExceeded(vm *infrav1.VSphereVM) bool return false } now := time.Now() - timeSoftPowerOff := conditions.GetLastTransitionTime(vm, infrav1.GuestSoftPowerOffSucceededCondition) + timeSoftPowerOff := deprecatedconditions.GetLastTransitionTime(vm, infrav1.GuestSoftPowerOffSucceededCondition) diff := now.Sub(timeSoftPowerOff.Time) var timeout time.Duration if vm.Spec.GuestSoftPowerOffTimeout != nil { @@ -81,7 +81,7 @@ func (vms *VMService) triggerSoftPowerOff(ctx context.Context, virtualMachineCtx return false, nil } - if conditions.Has(virtualMachineCtx.VSphereVM, infrav1.GuestSoftPowerOffSucceededCondition) { + if deprecatedconditions.Has(virtualMachineCtx.VSphereVM, infrav1.GuestSoftPowerOffSucceededCondition) { // soft power off operation has been triggered. if virtualMachineCtx.VSphereVM.Spec.PowerOffMode == infrav1.VirtualMachinePowerOpModeSoft { return true, nil @@ -102,9 +102,9 @@ func (vms *VMService) triggerSoftPowerOff(ctx context.Context, virtualMachineCtx return false, nil } - conditions.MarkFalse(virtualMachineCtx.VSphereVM, infrav1.GuestSoftPowerOffSucceededCondition, infrav1.GuestSoftPowerOffFailedReason, clusterv1.ConditionSeverityWarning, + deprecatedconditions.MarkFalse(virtualMachineCtx.VSphereVM, infrav1.GuestSoftPowerOffSucceededCondition, infrav1.GuestSoftPowerOffFailedReason, clusterv1beta1.ConditionSeverityWarning, "VMware Tools not installed on VM %s", client.ObjectKeyFromObject(virtualMachineCtx.VSphereVM)) - v1beta2conditions.Set(virtualMachineCtx.VSphereVM, metav1.Condition{ + deprecatedv1beta2conditions.Set(virtualMachineCtx.VSphereVM, metav1.Condition{ Type: infrav1.VSphereVMGuestSoftPowerOffSucceededV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereVMGuestSoftPowerOffFailedV1Beta2Reason, @@ -125,9 +125,9 @@ func (vms *VMService) triggerSoftPowerOff(ctx context.Context, virtualMachineCtx return false, nil } - conditions.MarkFalse(virtualMachineCtx.VSphereVM, infrav1.GuestSoftPowerOffSucceededCondition, infrav1.GuestSoftPowerOffFailedReason, clusterv1.ConditionSeverityWarning, + deprecatedconditions.MarkFalse(virtualMachineCtx.VSphereVM, infrav1.GuestSoftPowerOffSucceededCondition, infrav1.GuestSoftPowerOffFailedReason, clusterv1beta1.ConditionSeverityWarning, "unable to trigger soft power off because guest state change is not supported on VM %s.", client.ObjectKeyFromObject(virtualMachineCtx.VSphereVM)) - v1beta2conditions.Set(virtualMachineCtx.VSphereVM, metav1.Condition{ + deprecatedv1beta2conditions.Set(virtualMachineCtx.VSphereVM, metav1.Condition{ Type: infrav1.VSphereVMGuestSoftPowerOffSucceededV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereVMGuestSoftPowerOffFailedV1Beta2Reason, @@ -142,9 +142,9 @@ func (vms *VMService) triggerSoftPowerOff(ctx context.Context, virtualMachineCtx return false, err } - conditions.MarkFalse(virtualMachineCtx.VSphereVM, infrav1.GuestSoftPowerOffSucceededCondition, infrav1.GuestSoftPowerOffInProgressReason, clusterv1.ConditionSeverityInfo, + deprecatedconditions.MarkFalse(virtualMachineCtx.VSphereVM, infrav1.GuestSoftPowerOffSucceededCondition, infrav1.GuestSoftPowerOffInProgressReason, clusterv1beta1.ConditionSeverityInfo, "guest soft power off initiated on VM %s", client.ObjectKeyFromObject(virtualMachineCtx.VSphereVM)) - v1beta2conditions.Set(virtualMachineCtx.VSphereVM, metav1.Condition{ + deprecatedv1beta2conditions.Set(virtualMachineCtx.VSphereVM, metav1.Condition{ Type: infrav1.VSphereVMGuestSoftPowerOffSucceededV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereVMGuestSoftPowerOffInProgressV1Beta2Reason, diff --git a/pkg/services/govmomi/power_test.go b/pkg/services/govmomi/power_test.go index 76fb90a384..e4572c0af9 100644 --- a/pkg/services/govmomi/power_test.go +++ b/pkg/services/govmomi/power_test.go @@ -26,7 +26,7 @@ import ( "github.com/vmware/govmomi/simulator" "github.com/vmware/govmomi/vim25" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client/fake" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" @@ -64,7 +64,7 @@ func TestIsSoftPowerOffTimeoutExceeded(t *testing.T) { GuestSoftPowerOffTimeout: nil, }, Status: infrav1.VSphereVMStatus{ - Conditions: []clusterv1.Condition{ + Conditions: []clusterv1beta1.Condition{ { Type: infrav1.GuestSoftPowerOffSucceededCondition, Status: infrav1.GuestSoftPowerOffInProgressReason, @@ -90,7 +90,7 @@ func TestIsSoftPowerOffTimeoutExceeded(t *testing.T) { GuestSoftPowerOffTimeout: &metav1.Duration{Duration: 0}, }, Status: infrav1.VSphereVMStatus{ - Conditions: []clusterv1.Condition{ + Conditions: []clusterv1beta1.Condition{ { Type: infrav1.GuestSoftPowerOffSucceededCondition, Status: infrav1.GuestSoftPowerOffInProgressReason, @@ -116,7 +116,7 @@ func TestIsSoftPowerOffTimeoutExceeded(t *testing.T) { GuestSoftPowerOffTimeout: &metav1.Duration{Duration: time.Minute}, }, Status: infrav1.VSphereVMStatus{ - Conditions: []clusterv1.Condition{ + Conditions: []clusterv1beta1.Condition{ { Type: infrav1.GuestSoftPowerOffSucceededCondition, Status: infrav1.GuestSoftPowerOffInProgressReason, @@ -193,7 +193,7 @@ func TestTriggerSoftPowerOff(t *testing.T) { GuestSoftPowerOffTimeout: nil, }, Status: infrav1.VSphereVMStatus{ - Conditions: []clusterv1.Condition{ + Conditions: []clusterv1beta1.Condition{ { Type: infrav1.GuestSoftPowerOffSucceededCondition, Status: infrav1.GuestSoftPowerOffInProgressReason, @@ -230,7 +230,7 @@ func TestTriggerSoftPowerOff(t *testing.T) { GuestSoftPowerOffTimeout: &metav1.Duration{Duration: 3 * time.Minute}, }, Status: infrav1.VSphereVMStatus{ - Conditions: []clusterv1.Condition{ + Conditions: []clusterv1beta1.Condition{ { Type: infrav1.GuestSoftPowerOffSucceededCondition, Status: infrav1.GuestSoftPowerOffInProgressReason, @@ -267,7 +267,7 @@ func TestTriggerSoftPowerOff(t *testing.T) { GuestSoftPowerOffTimeout: &metav1.Duration{Duration: 1 * time.Minute}, }, Status: infrav1.VSphereVMStatus{ - Conditions: []clusterv1.Condition{ + Conditions: []clusterv1beta1.Condition{ { Type: infrav1.GuestSoftPowerOffSucceededCondition, Status: infrav1.GuestSoftPowerOffInProgressReason, @@ -332,7 +332,7 @@ func TestTriggerSoftPowerOff(t *testing.T) { GuestSoftPowerOffTimeout: &metav1.Duration{Duration: 1 * time.Minute}, }, Status: infrav1.VSphereVMStatus{ - Conditions: []clusterv1.Condition{ + Conditions: []clusterv1beta1.Condition{ { Type: infrav1.GuestSoftPowerOffSucceededCondition, Status: infrav1.GuestSoftPowerOffInProgressReason, diff --git a/pkg/services/govmomi/service.go b/pkg/services/govmomi/service.go index 6cb7793c14..7e7f9304f7 100644 --- a/pkg/services/govmomi/service.go +++ b/pkg/services/govmomi/service.go @@ -34,10 +34,10 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" apitypes "k8s.io/apimachinery/pkg/types" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + deprecatedv1beta2conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions/v1beta2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -90,8 +90,8 @@ func (vms *VMService) ReconcileVM(ctx context.Context, vmCtx *capvcontext.VMCont // but sometimes this error is transient, for instance, if the storage was temporarily disconnected but // later recovered, the machine will recover from this error. if wasNotFoundByBIOSUUID(err) { - conditions.MarkFalse(vmCtx.VSphereVM, infrav1.VMProvisionedCondition, infrav1.NotFoundByBIOSUUIDReason, clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(vmCtx.VSphereVM, metav1.Condition{ + deprecatedconditions.MarkFalse(vmCtx.VSphereVM, infrav1.VMProvisionedCondition, infrav1.NotFoundByBIOSUUIDReason, clusterv1beta1.ConditionSeverityWarning, "%v", err) + deprecatedv1beta2conditions.Set(vmCtx.VSphereVM, metav1.Condition{ Type: infrav1.VSphereVMVirtualMachineProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereVMVirtualMachineNotFoundByBIOSUUIDV1Beta2Reason, @@ -104,9 +104,9 @@ func (vms *VMService) ReconcileVM(ctx context.Context, vmCtx *capvcontext.VMCont // Otherwise, this is a new machine and the VM should be created. // NOTE: We are setting this condition only in case it does not exist, so we avoid to get flickering LastConditionTime // in case of cloning errors or powering on errors. - if !conditions.Has(vmCtx.VSphereVM, infrav1.VMProvisionedCondition) { - conditions.MarkFalse(vmCtx.VSphereVM, infrav1.VMProvisionedCondition, infrav1.CloningReason, clusterv1.ConditionSeverityInfo, "") - v1beta2conditions.Set(vmCtx.VSphereVM, metav1.Condition{ + if !deprecatedconditions.Has(vmCtx.VSphereVM, infrav1.VMProvisionedCondition) { + deprecatedconditions.MarkFalse(vmCtx.VSphereVM, infrav1.VMProvisionedCondition, infrav1.CloningReason, clusterv1beta1.ConditionSeverityInfo, "") + deprecatedv1beta2conditions.Set(vmCtx.VSphereVM, metav1.Condition{ Type: infrav1.VSphereVMVirtualMachineProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereVMVirtualMachineWaitingForCloneV1Beta2Reason, @@ -116,8 +116,8 @@ func (vms *VMService) ReconcileVM(ctx context.Context, vmCtx *capvcontext.VMCont // Get the bootstrap data. bootstrapData, format, err := vms.getBootstrapData(ctx, vmCtx) if err != nil { - conditions.MarkFalse(vmCtx.VSphereVM, infrav1.VMProvisionedCondition, infrav1.CloningFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(vmCtx.VSphereVM, metav1.Condition{ + deprecatedconditions.MarkFalse(vmCtx.VSphereVM, infrav1.VMProvisionedCondition, infrav1.CloningFailedReason, clusterv1beta1.ConditionSeverityWarning, "%v", err) + deprecatedv1beta2conditions.Set(vmCtx.VSphereVM, metav1.Condition{ Type: infrav1.VSphereVMVirtualMachineProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereVMVirtualMachineNotProvisionedV1Beta2Reason, @@ -129,8 +129,8 @@ func (vms *VMService) ReconcileVM(ctx context.Context, vmCtx *capvcontext.VMCont // Create the VM. err = createVM(ctx, vmCtx, bootstrapData, format) if err != nil { - conditions.MarkFalse(vmCtx.VSphereVM, infrav1.VMProvisionedCondition, infrav1.CloningFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(vmCtx.VSphereVM, metav1.Condition{ + deprecatedconditions.MarkFalse(vmCtx.VSphereVM, infrav1.VMProvisionedCondition, infrav1.CloningFailedReason, clusterv1beta1.ConditionSeverityWarning, "%v", err) + deprecatedv1beta2conditions.Set(vmCtx.VSphereVM, metav1.Condition{ Type: infrav1.VSphereVMVirtualMachineProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereVMVirtualMachineNotProvisionedV1Beta2Reason, @@ -197,8 +197,8 @@ func (vms *VMService) ReconcileVM(ctx context.Context, vmCtx *capvcontext.VMCont } if err := vms.reconcileTags(ctx, virtualMachineCtx); err != nil { - conditions.MarkFalse(vmCtx.VSphereVM, infrav1.VMProvisionedCondition, infrav1.TagsAttachmentFailedReason, clusterv1.ConditionSeverityError, err.Error()) - v1beta2conditions.Set(vmCtx.VSphereVM, metav1.Condition{ + deprecatedconditions.MarkFalse(vmCtx.VSphereVM, infrav1.VMProvisionedCondition, infrav1.TagsAttachmentFailedReason, clusterv1beta1.ConditionSeverityError, "%v", err) + deprecatedv1beta2conditions.Set(vmCtx.VSphereVM, metav1.Condition{ Type: infrav1.VSphereVMVirtualMachineProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereVMVirtualMachineNotProvisionedV1Beta2Reason, @@ -290,9 +290,9 @@ func (vms *VMService) DestroyVM(ctx context.Context, vmCtx *capvcontext.VMContex } // Only set the GuestPowerOffCondition to true when the guest shutdown has been initiated. - if conditions.Has(virtualMachineCtx.VSphereVM, infrav1.GuestSoftPowerOffSucceededCondition) { - conditions.MarkTrue(virtualMachineCtx.VSphereVM, infrav1.GuestSoftPowerOffSucceededCondition) - v1beta2conditions.Set(virtualMachineCtx.VSphereVM, metav1.Condition{ + if deprecatedconditions.Has(virtualMachineCtx.VSphereVM, infrav1.GuestSoftPowerOffSucceededCondition) { + deprecatedconditions.MarkTrue(virtualMachineCtx.VSphereVM, infrav1.GuestSoftPowerOffSucceededCondition) + deprecatedv1beta2conditions.Set(virtualMachineCtx.VSphereVM, metav1.Condition{ Type: infrav1.VSphereVMGuestSoftPowerOffSucceededV1Beta2Condition, Status: metav1.ConditionTrue, Reason: infrav1.VSphereVMGuestSoftPowerOffSucceededV1Beta2Reason, @@ -343,8 +343,8 @@ func (vms *VMService) reconcileIPAddresses(ctx context.Context, virtualMachineCt return false, err } if errors.Is(err, ipam.ErrWaitingForIPAddr) { - conditions.MarkFalse(virtualMachineCtx.VSphereVM, infrav1.VMProvisionedCondition, infrav1.WaitingForIPAddressReason, clusterv1.ConditionSeverityInfo, err.Error()) - v1beta2conditions.Set(virtualMachineCtx.VSphereVM, metav1.Condition{ + deprecatedconditions.MarkFalse(virtualMachineCtx.VSphereVM, infrav1.VMProvisionedCondition, infrav1.WaitingForIPAddressReason, clusterv1beta1.ConditionSeverityInfo, "%v", err) + deprecatedv1beta2conditions.Set(virtualMachineCtx.VSphereVM, metav1.Condition{ Type: infrav1.VSphereVMVirtualMachineProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereVMVirtualMachineWaitingForIPAddressV1Beta2Reason, @@ -397,8 +397,8 @@ func (vms *VMService) reconcilePowerState(ctx context.Context, virtualMachineCtx log.Info("Powering on VM") task, err := virtualMachineCtx.Obj.PowerOn(ctx) if err != nil { - conditions.MarkFalse(virtualMachineCtx.VSphereVM, infrav1.VMProvisionedCondition, infrav1.PoweringOnFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(virtualMachineCtx.VSphereVM, metav1.Condition{ + deprecatedconditions.MarkFalse(virtualMachineCtx.VSphereVM, infrav1.VMProvisionedCondition, infrav1.PoweringOnFailedReason, clusterv1beta1.ConditionSeverityWarning, "%v", err) + deprecatedv1beta2conditions.Set(virtualMachineCtx.VSphereVM, metav1.Condition{ Type: infrav1.VSphereVMVirtualMachineProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereVMVirtualMachineNotProvisionedV1Beta2Reason, @@ -406,8 +406,8 @@ func (vms *VMService) reconcilePowerState(ctx context.Context, virtualMachineCtx }) return false, errors.Wrapf(err, "failed to trigger power on op for vm %s", virtualMachineCtx) } - conditions.MarkFalse(virtualMachineCtx.VSphereVM, infrav1.VMProvisionedCondition, infrav1.PoweringOnReason, clusterv1.ConditionSeverityInfo, "") - v1beta2conditions.Set(virtualMachineCtx.VSphereVM, metav1.Condition{ + deprecatedconditions.MarkFalse(virtualMachineCtx.VSphereVM, infrav1.VMProvisionedCondition, infrav1.PoweringOnReason, clusterv1beta1.ConditionSeverityInfo, "") + deprecatedv1beta2conditions.Set(virtualMachineCtx.VSphereVM, metav1.Condition{ Type: infrav1.VSphereVMVirtualMachineProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereVMVirtualMachinePoweringOnV1Beta2Reason, @@ -564,10 +564,10 @@ func (vms *VMService) reconcilePCIDevices(ctx context.Context, virtualMachineCtx } if len(specsToBeAdded) == 0 { - if conditions.Has(virtualMachineCtx.VSphereVM, infrav1.PCIDevicesDetachedCondition) { - conditions.Delete(virtualMachineCtx.VSphereVM, infrav1.PCIDevicesDetachedCondition) + if deprecatedconditions.Has(virtualMachineCtx.VSphereVM, infrav1.PCIDevicesDetachedCondition) { + deprecatedconditions.Delete(virtualMachineCtx.VSphereVM, infrav1.PCIDevicesDetachedCondition) - v1beta2conditions.Delete(virtualMachineCtx.VSphereVM, infrav1.VSphereVMPCIDevicesDetachedV1Beta2Condition) + deprecatedv1beta2conditions.Delete(virtualMachineCtx.VSphereVM, infrav1.VSphereVMPCIDevicesDetachedV1Beta2Condition) } log.V(5).Info("No new PCI devices to be added") return nil @@ -581,13 +581,13 @@ func (vms *VMService) reconcilePCIDevices(ctx context.Context, virtualMachineCtx // This would arise only when the PCI device is manually removed from // the VM post creation. log.Info("PCI device cannot be attached in powered on state") - conditions.MarkFalse(virtualMachineCtx.VSphereVM, + deprecatedconditions.MarkFalse(virtualMachineCtx.VSphereVM, infrav1.PCIDevicesDetachedCondition, infrav1.NotFoundReason, - clusterv1.ConditionSeverityWarning, + clusterv1beta1.ConditionSeverityWarning, "PCI devices removed after VM was powered on") - v1beta2conditions.Set(virtualMachineCtx.VSphereVM, metav1.Condition{ + deprecatedv1beta2conditions.Set(virtualMachineCtx.VSphereVM, metav1.Condition{ Type: infrav1.VSphereVMPCIDevicesDetachedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereVMPCIDevicesDetachedNotFoundV1Beta2Reason, diff --git a/pkg/services/govmomi/util.go b/pkg/services/govmomi/util.go index fb196ba855..19e2f543b4 100644 --- a/pkg/services/govmomi/util.go +++ b/pkg/services/govmomi/util.go @@ -28,9 +28,9 @@ import ( "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + deprecatedv1beta2conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions/v1beta2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/event" @@ -178,8 +178,8 @@ func checkAndRetryTask(ctx context.Context, vmCtx *capvcontext.VMContext, task * } log.Info("Task found: Task failed") - conditions.MarkFalse(vmCtx.VSphereVM, infrav1.VMProvisionedCondition, infrav1.TaskFailure, clusterv1.ConditionSeverityInfo, errorMessage) - v1beta2conditions.Set(vmCtx.VSphereVM, metav1.Condition{ + deprecatedconditions.MarkFalse(vmCtx.VSphereVM, infrav1.VMProvisionedCondition, infrav1.TaskFailure, clusterv1beta1.ConditionSeverityInfo, "%s", errorMessage) + deprecatedv1beta2conditions.Set(vmCtx.VSphereVM, metav1.Condition{ Type: infrav1.VSphereVMVirtualMachineProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereVMVirtualMachineTaskFailedV1Beta2Reason, diff --git a/pkg/services/govmomi/util_test.go b/pkg/services/govmomi/util_test.go index d9a122a873..981c44a24b 100644 --- a/pkg/services/govmomi/util_test.go +++ b/pkg/services/govmomi/util_test.go @@ -26,7 +26,7 @@ import ( "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/cluster-api/util/conditions" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" capvcontext "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context" @@ -104,7 +104,7 @@ func Test_ShouldRetryTask(t *testing.T) { reconciled, err := checkAndRetryTask(ctx, vmCtx, &task) g.Expect(err).NotTo(HaveOccurred()) g.Expect(reconciled).To(BeTrue()) - g.Expect(conditions.IsFalse(vmCtx.VSphereVM, infrav1.VMProvisionedCondition)).To(BeTrue()) + g.Expect(deprecatedconditions.IsFalse(vmCtx.VSphereVM, infrav1.VMProvisionedCondition)).To(BeTrue()) g.Expect(vmCtx.VSphereVM.Status.TaskRef).To(BeEmpty()) g.Expect(vmCtx.VSphereVM.Status.RetryAfter.IsZero()).To(BeTrue()) }) @@ -123,7 +123,7 @@ func Test_ShouldRetryTask(t *testing.T) { reconciled, err := checkAndRetryTask(ctx, vmCtx, &task) g.Expect(err).NotTo(HaveOccurred()) g.Expect(reconciled).To(BeTrue()) - g.Expect(conditions.IsFalse(vmCtx.VSphereVM, infrav1.VMProvisionedCondition)).To(BeTrue()) + g.Expect(deprecatedconditions.IsFalse(vmCtx.VSphereVM, infrav1.VMProvisionedCondition)).To(BeTrue()) g.Expect(vmCtx.VSphereVM.Status.RetryAfter.Unix()).To(BeNumerically("<=", metav1.Now().Add(1*time.Minute).Unix())) }) diff --git a/pkg/services/govmomi/vcenter/clone.go b/pkg/services/govmomi/vcenter/clone.go index 40d3174fb3..35dd8ca9a1 100644 --- a/pkg/services/govmomi/vcenter/clone.go +++ b/pkg/services/govmomi/vcenter/clone.go @@ -30,7 +30,7 @@ import ( "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" "k8s.io/utils/ptr" - bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" ctrl "sigs.k8s.io/controller-runtime" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" diff --git a/pkg/services/interfaces.go b/pkg/services/interfaces.go index df4685cddb..3cef3b5cc2 100644 --- a/pkg/services/interfaces.go +++ b/pkg/services/interfaces.go @@ -22,7 +22,8 @@ import ( vmoprv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha2" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -56,7 +57,7 @@ type VirtualMachineService interface { type ControlPlaneEndpointService interface { // ReconcileControlPlaneEndpointService manages the lifecycle of a // control plane endpoint managed by a vmoperator VirtualMachineService - ReconcileControlPlaneEndpointService(ctx context.Context, clusterCtx *vmware.ClusterContext, netProvider NetworkProvider) (*clusterv1.APIEndpoint, error) + ReconcileControlPlaneEndpointService(ctx context.Context, clusterCtx *vmware.ClusterContext, netProvider NetworkProvider) (*clusterv1beta1.APIEndpoint, error) } // ResourcePolicyService is a service for reconciling a VirtualMachineSetResourcePolicy for a cluster. diff --git a/pkg/services/network/dummy_provider.go b/pkg/services/network/dummy_provider.go index 5274a210a3..ad90de62fc 100644 --- a/pkg/services/network/dummy_provider.go +++ b/pkg/services/network/dummy_provider.go @@ -22,7 +22,7 @@ import ( vmoprv1 "github.com/vmware-tanzu/vm-operator/api/v1alpha2" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + deprecatedv1beta2conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions/v1beta2" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context/vmware" @@ -46,7 +46,7 @@ func (np *dummyNetworkProvider) SupportsVMReadinessProbe() bool { } func (np *dummyNetworkProvider) ProvisionClusterNetwork(_ context.Context, clusterCtx *vmware.ClusterContext) error { - v1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ + deprecatedv1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ Type: vmwarev1.VSphereClusterNetworkReadyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: vmwarev1.VSphereClusterNetworkReadyV1Beta2Reason, diff --git a/pkg/services/network/netop_provider.go b/pkg/services/network/netop_provider.go index 13a6d93b9a..084de25110 100644 --- a/pkg/services/network/netop_provider.go +++ b/pkg/services/network/netop_provider.go @@ -25,8 +25,8 @@ import ( vmoprv1common "github.com/vmware-tanzu/vm-operator/api/v1alpha2/common" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/cluster-api/util/conditions" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + deprecatedv1beta2conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions/v1beta2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -57,8 +57,8 @@ func (np *netopNetworkProvider) SupportsVMReadinessProbe() bool { // ProvisionClusterNetwork marks the ClusterNetworkReadyCondition true. func (np *netopNetworkProvider) ProvisionClusterNetwork(_ context.Context, clusterCtx *vmware.ClusterContext) error { - conditions.MarkTrue(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition) - v1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ + deprecatedconditions.MarkTrue(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition) + deprecatedv1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ Type: vmwarev1.VSphereClusterNetworkReadyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: vmwarev1.VSphereClusterNetworkReadyV1Beta2Reason, diff --git a/pkg/services/network/network_test.go b/pkg/services/network/network_test.go index ca424e0951..7847bcca25 100644 --- a/pkg/services/network/network_test.go +++ b/pkg/services/network/network_test.go @@ -31,8 +31,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" apitypes "k8s.io/apimachinery/pkg/types" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -59,7 +59,7 @@ func (m *MockNSXTNetworkProvider) ProvisionClusterNetwork(ctx context.Context, c if err != nil { // Check if the error contains the string "virtual network ready status" if strings.Contains(err.Error(), "virtual network ready status") { - conditions.MarkTrue(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition) + deprecatedconditions.MarkTrue(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition) return nil } // return the original error if it doesn't contain the specific string @@ -79,7 +79,7 @@ func (m *MockNSXTVpcNetworkProvider) ProvisionClusterNetwork(ctx context.Context if err != nil { // Check if the error contains the string "subnetset ready status" if strings.Contains(err.Error(), "subnetset ready status") { - conditions.MarkTrue(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition) + deprecatedconditions.MarkTrue(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition) return nil } // return the original error if it doesn't contain the specific string @@ -367,7 +367,7 @@ var _ = Describe("Network provider", func() { Expect(err).ToNot(HaveOccurred()) }) It("should succeed", func() { - Expect(conditions.IsTrue(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition)).To(BeTrue()) + Expect(deprecatedconditions.IsTrue(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition)).To(BeTrue()) }) }) @@ -401,7 +401,7 @@ var _ = Describe("Network provider", func() { annotations, err := np.GetVMServiceAnnotations(ctx, clusterCtx) Expect(err).ToNot(HaveOccurred()) Expect(annotations).To(HaveKeyWithValue("ncp.vmware.com/virtual-network-name", GetNSXTVirtualNetworkName(clusterCtx.VSphereCluster.Name))) - Expect(conditions.IsTrue(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition)).To(BeTrue()) + Expect(deprecatedconditions.IsTrue(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition)).To(BeTrue()) }) }) @@ -433,7 +433,7 @@ var _ = Describe("Network provider", func() { Expect(err).ToNot(HaveOccurred()) Expect(createdVNET.Spec.WhitelistSourceRanges).To(BeEmpty()) - Expect(conditions.IsTrue(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition)).To(BeTrue()) + Expect(deprecatedconditions.IsTrue(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition)).To(BeTrue()) }) }) @@ -460,7 +460,7 @@ var _ = Describe("Network provider", func() { Expect(err).ToNot(HaveOccurred()) Expect(createdVNET.Spec.WhitelistSourceRanges).To(Equal(fakeSNATIP + "/32")) - Expect(conditions.IsTrue(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition)).To(BeTrue()) + Expect(deprecatedconditions.IsTrue(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition)).To(BeTrue()) }) }) @@ -494,7 +494,7 @@ var _ = Describe("Network provider", func() { Expect(createdVNET.Spec.WhitelistSourceRanges).To(Equal(fakeSNATIP + "/32")) // err is not empty, but it is because vnetObj does not have status mocked in this test - Expect(conditions.IsTrue(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition)).To(BeTrue()) + Expect(deprecatedconditions.IsTrue(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition)).To(BeTrue()) }) }) @@ -524,7 +524,7 @@ var _ = Describe("Network provider", func() { Expect(createdVNET.Spec.WhitelistSourceRanges).To(BeEmpty()) // err is not empty, but it is because vnetObj does not have status mocked in this test - Expect(conditions.IsTrue(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition)).To(BeTrue()) + Expect(deprecatedconditions.IsTrue(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition)).To(BeTrue()) }) AfterEach(func() { @@ -562,7 +562,7 @@ var _ = Describe("Network provider", func() { expectedErrorMessage := fmt.Sprintf("virtual network ready status is: '%s' in cluster %s. reason: %s, message: %s", "False", apitypes.NamespacedName{Namespace: dummyNs, Name: dummyCluster}, testNetworkNotRealizedReason, testNetworkNotRealizedMessage) Expect(err).To(MatchError(expectedErrorMessage)) - Expect(conditions.IsFalse(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition)).To(BeTrue()) + Expect(deprecatedconditions.IsFalse(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition)).To(BeTrue()) }) It("should return error when vnet ready status is not set", func() { @@ -579,7 +579,7 @@ var _ = Describe("Network provider", func() { expectedErrorMessage := fmt.Sprintf("virtual network ready status in cluster %s has not been set", apitypes.NamespacedName{Namespace: dummyNs, Name: dummyCluster}) Expect(err).To(MatchError(expectedErrorMessage)) - Expect(conditions.IsFalse(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition)).To(BeTrue()) + Expect(deprecatedconditions.IsFalse(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition)).To(BeTrue()) }) }) @@ -727,7 +727,7 @@ var _ = Describe("Network provider", func() { expectedErrorMessage := fmt.Sprintf("subnetset ready status is: '%s' in cluster %s. reason: %s, message: %s", "False", apitypes.NamespacedName{Namespace: dummyNs, Name: dummyCluster}, testNetworkNotRealizedReason, testNetworkNotRealizedMessage) Expect(err).To(MatchError(expectedErrorMessage)) - Expect(conditions.IsFalse(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition)).To(BeTrue()) + Expect(deprecatedconditions.IsFalse(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition)).To(BeTrue()) }) It("should return error when subnetset ready status is not set", func() { @@ -745,7 +745,7 @@ var _ = Describe("Network provider", func() { err = np.VerifyNetworkStatus(ctx, clusterCtx, subnetsetObj) expectedErrorMessage := fmt.Sprintf("subnetset ready status in cluster %s has not been set", apitypes.NamespacedName{Namespace: dummyNs, Name: dummyCluster}) Expect(err).To(MatchError(expectedErrorMessage)) - Expect(conditions.IsFalse(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition)).To(BeTrue()) + Expect(deprecatedconditions.IsFalse(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition)).To(BeTrue()) }) }) }) diff --git a/pkg/services/network/nsxt_provider.go b/pkg/services/network/nsxt_provider.go index 0c77c8c2a1..37097a82c1 100644 --- a/pkg/services/network/nsxt_provider.go +++ b/pkg/services/network/nsxt_provider.go @@ -27,9 +27,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + deprecatedv1beta2conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions/v1beta2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -78,8 +78,8 @@ func (np *nsxtNetworkProvider) verifyNSXTVirtualNetworkStatus(vspherecluster *vm } hasReadyCondition = true if condition.Status != "True" { - conditions.MarkFalse(vspherecluster, vmwarev1.ClusterNetworkReadyCondition, vmwarev1.ClusterNetworkProvisionFailedReason, clusterv1.ConditionSeverityWarning, condition.Message) - v1beta2conditions.Set(vspherecluster, metav1.Condition{ + deprecatedconditions.MarkFalse(vspherecluster, vmwarev1.ClusterNetworkReadyCondition, vmwarev1.ClusterNetworkProvisionFailedReason, clusterv1beta1.ConditionSeverityWarning, "%s", condition.Message) + deprecatedv1beta2conditions.Set(vspherecluster, metav1.Condition{ Type: vmwarev1.VSphereClusterNetworkReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: vmwarev1.VSphereClusterNetworkNotReadyV1Beta2Reason, @@ -91,8 +91,8 @@ func (np *nsxtNetworkProvider) verifyNSXTVirtualNetworkStatus(vspherecluster *vm } if !hasReadyCondition { - conditions.MarkFalse(vspherecluster, vmwarev1.ClusterNetworkReadyCondition, vmwarev1.ClusterNetworkProvisionFailedReason, clusterv1.ConditionSeverityWarning, "No Ready status for virtual network") - v1beta2conditions.Set(vspherecluster, metav1.Condition{ + deprecatedconditions.MarkFalse(vspherecluster, vmwarev1.ClusterNetworkReadyCondition, vmwarev1.ClusterNetworkProvisionFailedReason, clusterv1beta1.ConditionSeverityWarning, "No Ready status for virtual network") + deprecatedv1beta2conditions.Set(vspherecluster, metav1.Condition{ Type: vmwarev1.VSphereClusterNetworkReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: vmwarev1.VSphereClusterNetworkNotReadyV1Beta2Reason, @@ -101,8 +101,8 @@ func (np *nsxtNetworkProvider) verifyNSXTVirtualNetworkStatus(vspherecluster *vm return errors.Errorf("virtual network ready status in cluster %s has not been set", types.NamespacedName{Namespace: namespace, Name: clusterName}) } - conditions.MarkTrue(vspherecluster, vmwarev1.ClusterNetworkReadyCondition) - v1beta2conditions.Set(vspherecluster, metav1.Condition{ + deprecatedconditions.MarkTrue(vspherecluster, vmwarev1.ClusterNetworkReadyCondition) + deprecatedv1beta2conditions.Set(vspherecluster, metav1.Condition{ Type: vmwarev1.VSphereClusterNetworkReadyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: vmwarev1.VSphereClusterNetworkReadyV1Beta2Reason, @@ -173,8 +173,8 @@ func (np *nsxtNetworkProvider) ProvisionClusterNetwork(ctx context.Context, clus return nil }) if err != nil { - conditions.MarkFalse(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition, vmwarev1.ClusterNetworkProvisionFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ + deprecatedconditions.MarkFalse(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition, vmwarev1.ClusterNetworkProvisionFailedReason, clusterv1beta1.ConditionSeverityWarning, "%v", err) + deprecatedv1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ Type: vmwarev1.VSphereClusterNetworkReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: vmwarev1.VSphereClusterNetworkNotReadyV1Beta2Reason, diff --git a/pkg/services/network/nsxt_vpc_provider.go b/pkg/services/network/nsxt_vpc_provider.go index 3d871006b0..e818837127 100644 --- a/pkg/services/network/nsxt_vpc_provider.go +++ b/pkg/services/network/nsxt_vpc_provider.go @@ -29,9 +29,9 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + deprecatedv1beta2conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions/v1beta2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -78,8 +78,8 @@ func (vp *nsxtVPCNetworkProvider) verifyNsxtVpcSubnetSetStatus(vspherecluster *v } hasReadyCondition = true if condition.Status != corev1.ConditionTrue { - conditions.MarkFalse(vspherecluster, vmwarev1.ClusterNetworkReadyCondition, vmwarev1.ClusterNetworkProvisionFailedReason, clusterv1.ConditionSeverityWarning, condition.Message) - v1beta2conditions.Set(vspherecluster, metav1.Condition{ + deprecatedconditions.MarkFalse(vspherecluster, vmwarev1.ClusterNetworkReadyCondition, vmwarev1.ClusterNetworkProvisionFailedReason, clusterv1beta1.ConditionSeverityWarning, "%s", condition.Message) + deprecatedv1beta2conditions.Set(vspherecluster, metav1.Condition{ Type: vmwarev1.VSphereClusterNetworkReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: vmwarev1.VSphereClusterNetworkNotReadyV1Beta2Reason, @@ -91,8 +91,8 @@ func (vp *nsxtVPCNetworkProvider) verifyNsxtVpcSubnetSetStatus(vspherecluster *v } if !hasReadyCondition { - conditions.MarkFalse(vspherecluster, vmwarev1.ClusterNetworkReadyCondition, vmwarev1.ClusterNetworkProvisionFailedReason, clusterv1.ConditionSeverityWarning, "No Ready status for SubnetSet") - v1beta2conditions.Set(vspherecluster, metav1.Condition{ + deprecatedconditions.MarkFalse(vspherecluster, vmwarev1.ClusterNetworkReadyCondition, vmwarev1.ClusterNetworkProvisionFailedReason, clusterv1beta1.ConditionSeverityWarning, "No Ready status for SubnetSet") + deprecatedv1beta2conditions.Set(vspherecluster, metav1.Condition{ Type: vmwarev1.VSphereClusterNetworkReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: vmwarev1.VSphereClusterNetworkNotReadyV1Beta2Reason, @@ -101,8 +101,8 @@ func (vp *nsxtVPCNetworkProvider) verifyNsxtVpcSubnetSetStatus(vspherecluster *v return errors.Errorf("subnetset ready status in cluster %s has not been set", types.NamespacedName{Namespace: namespace, Name: clusterName}) } - conditions.MarkTrue(vspherecluster, vmwarev1.ClusterNetworkReadyCondition) - v1beta2conditions.Set(vspherecluster, metav1.Condition{ + deprecatedconditions.MarkTrue(vspherecluster, vmwarev1.ClusterNetworkReadyCondition) + deprecatedv1beta2conditions.Set(vspherecluster, metav1.Condition{ Type: vmwarev1.VSphereClusterNetworkReadyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: vmwarev1.VSphereClusterNetworkReadyV1Beta2Reason, @@ -157,8 +157,8 @@ func (vp *nsxtVPCNetworkProvider) ProvisionClusterNetwork(ctx context.Context, c return nil }) if err != nil { - conditions.MarkFalse(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition, vmwarev1.ClusterNetworkProvisionFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ + deprecatedconditions.MarkFalse(clusterCtx.VSphereCluster, vmwarev1.ClusterNetworkReadyCondition, vmwarev1.ClusterNetworkProvisionFailedReason, clusterv1beta1.ConditionSeverityWarning, "%v", err) + deprecatedv1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ Type: vmwarev1.VSphereClusterNetworkReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: vmwarev1.VSphereClusterNetworkNotReadyV1Beta2Reason, diff --git a/pkg/services/vimmachine.go b/pkg/services/vimmachine.go index 5270df9750..bc59793945 100644 --- a/pkg/services/vimmachine.go +++ b/pkg/services/vimmachine.go @@ -29,10 +29,11 @@ import ( "k8s.io/apimachinery/pkg/types" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/klog/v2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" clusterutilv1 "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + deprecatedv1beta2conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions/v1beta2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -117,9 +118,9 @@ func (v *VimMachineService) ReconcileDelete(ctx context.Context, machineCtx capv // VSphereMachine wraps a VMSphereVM, so we are mirroring status from the underlying VMSphereVM // in order to provide evidences about machine deletion. - conditions.SetMirror(vimMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, vm) - v1beta2conditions.SetMirrorCondition(vm, vimMachineCtx.VSphereMachine, infrav1.VSphereVMVirtualMachineProvisionedV1Beta2Condition, - v1beta2conditions.TargetConditionType(infrav1.VSphereMachineVirtualMachineProvisionedV1Beta2Condition)) + deprecatedconditions.SetMirror(vimMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, vm) + deprecatedv1beta2conditions.SetMirrorCondition(vm, vimMachineCtx.VSphereMachine, infrav1.VSphereVMVirtualMachineProvisionedV1Beta2Condition, + deprecatedv1beta2conditions.TargetConditionType(infrav1.VSphereMachineVirtualMachineProvisionedV1Beta2Condition)) return nil } @@ -167,9 +168,9 @@ func (v *VimMachineService) ReconcileNormal(ctx context.Context, machineCtx capv log.Info("Waiting for VSphereVM to become ready") // VSphereMachine wraps a VMSphereVM, so we are mirroring status from the underlying VMSphereVM // in order to provide evidences about machine provisioning while provisioning is actually happening. - conditions.SetMirror(vimMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, vm) - v1beta2conditions.SetMirrorCondition(vm, vimMachineCtx.VSphereMachine, infrav1.VSphereVMVirtualMachineProvisionedV1Beta2Condition, - v1beta2conditions.TargetConditionType(infrav1.VSphereMachineVirtualMachineProvisionedV1Beta2Condition)) + deprecatedconditions.SetMirror(vimMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, vm) + deprecatedv1beta2conditions.SetMirrorCondition(vm, vimMachineCtx.VSphereMachine, infrav1.VSphereVMVirtualMachineProvisionedV1Beta2Condition, + deprecatedv1beta2conditions.TargetConditionType(infrav1.VSphereMachineVirtualMachineProvisionedV1Beta2Condition)) return true, nil } @@ -186,8 +187,8 @@ func (v *VimMachineService) ReconcileNormal(ctx context.Context, machineCtx capv if err != nil { return false, errors.Wrapf(err, "unexpected error while reconciling network for %s", vimMachineCtx) } - conditions.MarkFalse(vimMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, infrav1.WaitingForNetworkAddressesReason, clusterv1.ConditionSeverityInfo, "") - v1beta2conditions.Set(vimMachineCtx.VSphereMachine, metav1.Condition{ + deprecatedconditions.MarkFalse(vimMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, infrav1.WaitingForNetworkAddressesReason, clusterv1beta1.ConditionSeverityInfo, "") + deprecatedv1beta2conditions.Set(vimMachineCtx.VSphereMachine, metav1.Condition{ Type: infrav1.VSphereMachineVirtualMachineProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereMachineVirtualMachineWaitingForNetworkAddressV1Beta2Reason, @@ -220,7 +221,7 @@ func (v *VimMachineService) GetHostInfo(ctx context.Context, machineCtx capvcont return "", err } - if conditions.IsTrue(vsphereVM, infrav1.VMProvisionedCondition) { + if deprecatedconditions.IsTrue(vsphereVM, infrav1.VMProvisionedCondition) { return vsphereVM.Status.Host, nil } log.V(4).Info("Returning empty host info as VMProvisioned condition is not set to true") @@ -297,15 +298,15 @@ func (v *VimMachineService) reconcileNetwork(ctx context.Context, vimMachineCtx vimMachineCtx.VSphereMachine.Status.Network = networkStatusList addresses := vm.Status.Addresses - machineAddresses := make([]clusterv1.MachineAddress, 0, len(addresses)) + machineAddresses := make([]clusterv1beta1.MachineAddress, 0, len(addresses)) for _, addr := range addresses { - machineAddresses = append(machineAddresses, clusterv1.MachineAddress{ - Type: clusterv1.MachineExternalIP, + machineAddresses = append(machineAddresses, clusterv1beta1.MachineAddress{ + Type: clusterv1beta1.MachineExternalIP, Address: addr, }) } - machineAddresses = append(machineAddresses, clusterv1.MachineAddress{ - Type: clusterv1.MachineInternalDNS, + machineAddresses = append(machineAddresses, clusterv1beta1.MachineAddress{ + Type: clusterv1beta1.MachineInternalDNS, Address: vm.GetName(), }) vimMachineCtx.VSphereMachine.Status.Addresses = machineAddresses diff --git a/pkg/services/vimmachine_test.go b/pkg/services/vimmachine_test.go index dcc1c7c9c4..4fc52635ba 100644 --- a/pkg/services/vimmachine_test.go +++ b/pkg/services/vimmachine_test.go @@ -25,8 +25,8 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ctrl "sigs.k8s.io/controller-runtime" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" @@ -382,7 +382,7 @@ func Test_VimMachineService_GetHostInfo(t *testing.T) { }, Status: infrav1.VSphereVMStatus{ Host: hostAddr, - Conditions: []clusterv1.Condition{ + Conditions: []clusterv1beta1.Condition{ { Type: infrav1.VMProvisionedCondition, Status: conditionStatus, @@ -427,7 +427,7 @@ func Test_VimMachineService_createOrPatchVSphereVM(t *testing.T) { }, Status: infrav1.VSphereVMStatus{ Host: hostAddr, - Conditions: []clusterv1.Condition{ + Conditions: []clusterv1beta1.Condition{ { Type: infrav1.VMProvisionedCondition, Status: conditionStatus, @@ -471,7 +471,7 @@ func Test_VimMachineService_createOrPatchVSphereVM(t *testing.T) { machineCtx := fake.NewMachineContext(ctx, fake.NewClusterContext(ctx, controllerManagerContext), controllerManagerContext) machineCtx.VSphereMachine.Spec.OS = infrav1.Windows machineCtx.Machine.SetName(fakeLongClusterName) - machineCtx.Machine.SetLabels(map[string]string{clusterv1.MachineControlPlaneLabel: "fake-control-plane"}) + machineCtx.Machine.SetLabels(map[string]string{clusterv1beta1.MachineControlPlaneLabel: "fake-control-plane"}) failureDomain := "zone-one" machineCtx.Machine.Spec.FailureDomain = &failureDomain vimMachineService := &VimMachineService{controllerManagerContext.Client} @@ -488,7 +488,7 @@ func Test_VimMachineService_createOrPatchVSphereVM(t *testing.T) { machineCtx := fake.NewMachineContext(ctx, fake.NewClusterContext(ctx, controllerManagerContext), controllerManagerContext) machineCtx.VSphereMachine.Spec.OS = infrav1.Linux machineCtx.Machine.SetName(fakeLongClusterName) - machineCtx.Machine.SetLabels(map[string]string{clusterv1.MachineControlPlaneLabel: "fake-control-plane"}) + machineCtx.Machine.SetLabels(map[string]string{clusterv1beta1.MachineControlPlaneLabel: "fake-control-plane"}) vimMachineService := &VimMachineService{controllerManagerContext.Client} vm, err := vimMachineService.createOrPatchVSphereVM(ctx, machineCtx, getVSphereVM(hostAddr, corev1.ConditionTrue)) @@ -512,7 +512,7 @@ func Test_VimMachineService_reconcileProviderID(t *testing.T) { }, Status: infrav1.VSphereVMStatus{ Host: hostAddr, - Conditions: []clusterv1.Condition{ + Conditions: []clusterv1beta1.Condition{ { Type: infrav1.VMProvisionedCondition, Status: conditionStatus, @@ -531,7 +531,7 @@ func Test_VimMachineService_reconcileProviderID(t *testing.T) { controllerManagerContext := fake.NewControllerManagerContext(vsphereVM) machineCtx := fake.NewMachineContext(ctx, fake.NewClusterContext(ctx, controllerManagerContext), controllerManagerContext) machineCtx.Machine.SetName(fakeLongClusterName) - machineCtx.Machine.SetLabels(map[string]string{clusterv1.MachineControlPlaneLabel: "fake-control-plane"}) + machineCtx.Machine.SetLabels(map[string]string{clusterv1beta1.MachineControlPlaneLabel: "fake-control-plane"}) vimMachineService := &VimMachineService{controllerManagerContext.Client} ok, err := vimMachineService.reconcileProviderID(ctx, machineCtx, vsphereVM) @@ -545,7 +545,7 @@ func Test_VimMachineService_reconcileProviderID(t *testing.T) { controllerManagerContext := fake.NewControllerManagerContext(vsphereVM) machineCtx := fake.NewMachineContext(ctx, fake.NewClusterContext(ctx, controllerManagerContext), controllerManagerContext) machineCtx.Machine.SetName(fakeLongClusterName) - machineCtx.Machine.SetLabels(map[string]string{clusterv1.MachineControlPlaneLabel: "fake-control-plane"}) + machineCtx.Machine.SetLabels(map[string]string{clusterv1beta1.MachineControlPlaneLabel: "fake-control-plane"}) vimMachineService := &VimMachineService{controllerManagerContext.Client} ok, err := vimMachineService.reconcileProviderID(ctx, machineCtx, vsphereVM) @@ -560,7 +560,7 @@ func Test_VimMachineService_reconcileProviderID(t *testing.T) { controllerManagerContext := fake.NewControllerManagerContext(vsphereVM) machineCtx := fake.NewMachineContext(ctx, fake.NewClusterContext(ctx, controllerManagerContext), controllerManagerContext) machineCtx.Machine.SetName(fakeLongClusterName) - machineCtx.Machine.SetLabels(map[string]string{clusterv1.MachineControlPlaneLabel: "fake-control-plane"}) + machineCtx.Machine.SetLabels(map[string]string{clusterv1beta1.MachineControlPlaneLabel: "fake-control-plane"}) vimMachineService := &VimMachineService{controllerManagerContext.Client} _, err := vimMachineService.reconcileProviderID(ctx, machineCtx, vsphereVM) @@ -585,7 +585,7 @@ func Test_VimMachineService_reconcileNetwork(t *testing.T) { Ready: conditionStatus == corev1.ConditionTrue, Addresses: addresses, Network: networkStatus, - Conditions: []clusterv1.Condition{ + Conditions: []clusterv1beta1.Condition{ { Type: infrav1.VMProvisionedCondition, Status: conditionStatus, @@ -609,14 +609,14 @@ func Test_VimMachineService_reconcileNetwork(t *testing.T) { controllerManagerContext := fake.NewControllerManagerContext(vsphereVM) machineCtx := fake.NewMachineContext(ctx, fake.NewClusterContext(ctx, controllerManagerContext), controllerManagerContext) machineCtx.Machine.SetName(fakeLongClusterName) - machineCtx.Machine.SetLabels(map[string]string{clusterv1.MachineControlPlaneLabel: "fake-control-plane"}) + machineCtx.Machine.SetLabels(map[string]string{clusterv1beta1.MachineControlPlaneLabel: "fake-control-plane"}) vimMachineService := &VimMachineService{controllerManagerContext.Client} ok, err := vimMachineService.reconcileNetwork(ctx, machineCtx, vsphereVM) g.Expect(err).NotTo(HaveOccurred()) g.Expect(ok).To(BeTrue()) - g.Expect(machineCtx.VSphereMachine.Status.Addresses).To(ContainElement(clusterv1.MachineAddress{ - Type: clusterv1.MachineInternalDNS, + g.Expect(machineCtx.VSphereMachine.Status.Addresses).To(ContainElement(clusterv1beta1.MachineAddress{ + Type: clusterv1beta1.MachineInternalDNS, Address: vsphereVM.Name, })) }) @@ -626,7 +626,7 @@ func Test_VimMachineService_reconcileNetwork(t *testing.T) { controllerManagerContext := fake.NewControllerManagerContext(vsphereVM) machineCtx := fake.NewMachineContext(ctx, fake.NewClusterContext(ctx, controllerManagerContext), controllerManagerContext) machineCtx.Machine.SetName(fakeLongClusterName) - machineCtx.Machine.SetLabels(map[string]string{clusterv1.MachineControlPlaneLabel: "fake-control-plane"}) + machineCtx.Machine.SetLabels(map[string]string{clusterv1beta1.MachineControlPlaneLabel: "fake-control-plane"}) vimMachineService := &VimMachineService{controllerManagerContext.Client} ok, err := vimMachineService.reconcileNetwork(ctx, machineCtx, vsphereVM) @@ -652,13 +652,13 @@ func Test_VimMachineService_ReconcileNormal(t *testing.T) { Ready: conditionStatus == corev1.ConditionTrue, Addresses: addresses, Network: networkStatus, - Conditions: []clusterv1.Condition{ + Conditions: []clusterv1beta1.Condition{ { Type: infrav1.VMProvisionedCondition, Status: conditionStatus, }, { - Type: clusterv1.ReadyCondition, + Type: clusterv1beta1.ReadyCondition, Status: conditionStatus, }, }, @@ -678,7 +678,7 @@ func Test_VimMachineService_ReconcileNormal(t *testing.T) { controllerManagerContext := fake.NewControllerManagerContext(vsphereVM) machineCtx := fake.NewMachineContext(ctx, fake.NewClusterContext(ctx, controllerManagerContext), controllerManagerContext) machineCtx.Machine.SetName(fakeLongClusterName) - machineCtx.Machine.SetLabels(map[string]string{clusterv1.MachineControlPlaneLabel: "fake-control-plane"}) + machineCtx.Machine.SetLabels(map[string]string{clusterv1beta1.MachineControlPlaneLabel: "fake-control-plane"}) vimMachineService := &VimMachineService{controllerManagerContext.Client} requeue, err := vimMachineService.ReconcileNormal(ctx, machineCtx) @@ -691,7 +691,7 @@ func Test_VimMachineService_ReconcileNormal(t *testing.T) { controllerManagerContext := fake.NewControllerManagerContext() machineCtx := fake.NewMachineContext(ctx, fake.NewClusterContext(ctx, controllerManagerContext), controllerManagerContext) machineCtx.Machine.SetName(fakeLongClusterName) - machineCtx.Machine.SetLabels(map[string]string{clusterv1.MachineControlPlaneLabel: "fake-control-plane"}) + machineCtx.Machine.SetLabels(map[string]string{clusterv1beta1.MachineControlPlaneLabel: "fake-control-plane"}) vimMachineService := &VimMachineService{controllerManagerContext.Client} requeue, err := vimMachineService.ReconcileNormal(ctx, machineCtx) @@ -706,7 +706,7 @@ func Test_VimMachineService_ReconcileNormal(t *testing.T) { controllerManagerContext := fake.NewControllerManagerContext(vsphereVM) machineCtx := fake.NewMachineContext(ctx, fake.NewClusterContext(ctx, controllerManagerContext), controllerManagerContext) machineCtx.Machine.SetName(fakeLongClusterName) - machineCtx.Machine.SetLabels(map[string]string{clusterv1.MachineControlPlaneLabel: "fake-control-plane"}) + machineCtx.Machine.SetLabels(map[string]string{clusterv1beta1.MachineControlPlaneLabel: "fake-control-plane"}) vimMachineService := &VimMachineService{controllerManagerContext.Client} _, err := vimMachineService.ReconcileNormal(ctx, machineCtx) @@ -719,7 +719,7 @@ func Test_VimMachineService_ReconcileNormal(t *testing.T) { controllerManagerContext := fake.NewControllerManagerContext(vsphereVM) machineCtx := fake.NewMachineContext(ctx, fake.NewClusterContext(ctx, controllerManagerContext), controllerManagerContext) machineCtx.Machine.SetName(fakeLongClusterName) - machineCtx.Machine.SetLabels(map[string]string{clusterv1.MachineControlPlaneLabel: "fake-control-plane"}) + machineCtx.Machine.SetLabels(map[string]string{clusterv1beta1.MachineControlPlaneLabel: "fake-control-plane"}) vimMachineService := &VimMachineService{controllerManagerContext.Client} requeue, err := vimMachineService.ReconcileNormal(ctx, machineCtx) @@ -732,7 +732,7 @@ func Test_VimMachineService_ReconcileNormal(t *testing.T) { controllerManagerContext := fake.NewControllerManagerContext(vsphereVM) machineCtx := fake.NewMachineContext(ctx, fake.NewClusterContext(ctx, controllerManagerContext), controllerManagerContext) machineCtx.Machine.SetName(fakeLongClusterName) - machineCtx.Machine.SetLabels(map[string]string{clusterv1.MachineControlPlaneLabel: "fake-control-plane"}) + machineCtx.Machine.SetLabels(map[string]string{clusterv1beta1.MachineControlPlaneLabel: "fake-control-plane"}) vimMachineService := &VimMachineService{controllerManagerContext.Client} requeue, err := vimMachineService.ReconcileNormal(ctx, machineCtx) @@ -755,13 +755,13 @@ func Test_VimMachineService_ReconcileDelete(t *testing.T) { }, Status: infrav1.VSphereVMStatus{ Host: hostAddr, - Conditions: []clusterv1.Condition{ + Conditions: []clusterv1beta1.Condition{ { Type: infrav1.VMProvisionedCondition, Status: conditionStatus, }, { - Type: clusterv1.ReadyCondition, + Type: clusterv1beta1.ReadyCondition, Status: conditionStatus, }, }, @@ -773,14 +773,14 @@ func Test_VimMachineService_ReconcileDelete(t *testing.T) { controllerManagerContext := fake.NewControllerManagerContext(vsphereVM) machineCtx := fake.NewMachineContext(ctx, fake.NewClusterContext(ctx, controllerManagerContext), controllerManagerContext) machineCtx.Machine.SetName(fakeLongClusterName) - machineCtx.Machine.SetLabels(map[string]string{clusterv1.MachineControlPlaneLabel: "fake-control-plane"}) + machineCtx.Machine.SetLabels(map[string]string{clusterv1beta1.MachineControlPlaneLabel: "fake-control-plane"}) vimMachineService := &VimMachineService{controllerManagerContext.Client} t.Run("deletes VSphereVM", func(t *testing.T) { g := NewWithT(t) err := vimMachineService.ReconcileDelete(ctx, machineCtx) g.Expect(err).NotTo(HaveOccurred()) - g.Expect(conditions.Get(machineCtx.VSphereMachine, infrav1.VMProvisionedCondition).Status).To(Equal(conditions.Get(vsphereVM, clusterv1.ReadyCondition).Status)) + g.Expect(deprecatedconditions.Get(machineCtx.VSphereMachine, infrav1.VMProvisionedCondition).Status).To(Equal(deprecatedconditions.Get(vsphereVM, clusterv1beta1.ReadyCondition).Status)) }) } @@ -820,7 +820,7 @@ func Test_VimMachineService_FetchVSphereMachine(t *testing.T) { controllerManagerContext := fake.NewControllerManagerContext(vsphereMachine) machineCtx := fake.NewMachineContext(ctx, fake.NewClusterContext(ctx, controllerManagerContext), controllerManagerContext) machineCtx.Machine.SetName(fakeLongClusterName) - machineCtx.Machine.SetLabels(map[string]string{clusterv1.MachineControlPlaneLabel: "fake-control-plane"}) + machineCtx.Machine.SetLabels(map[string]string{clusterv1beta1.MachineControlPlaneLabel: "fake-control-plane"}) vimMachineService := &VimMachineService{controllerManagerContext.Client} t.Run("fetches VSphereMachine successfully", func(t *testing.T) { @@ -857,7 +857,7 @@ func Test_VimMachineService_FetchVSphereCluster(t *testing.T) { controllerManagerContext := fake.NewControllerManagerContext(vsphereCluster) machineCtx := fake.NewMachineContext(ctx, fake.NewClusterContext(ctx, controllerManagerContext), controllerManagerContext) machineCtx.Machine.SetName(fakeLongClusterName) - machineCtx.Machine.SetLabels(map[string]string{clusterv1.MachineControlPlaneLabel: "fake-control-plane"}) + machineCtx.Machine.SetLabels(map[string]string{clusterv1beta1.MachineControlPlaneLabel: "fake-control-plane"}) vimMachineService := &VimMachineService{controllerManagerContext.Client} t.Run("fetches VSphereCluster successfully", func(t *testing.T) { @@ -881,7 +881,7 @@ func Test_VimMachineService_SyncFailureReason(t *testing.T) { }, Status: infrav1.VSphereVMStatus{ Host: hostAddr, - Conditions: []clusterv1.Condition{ + Conditions: []clusterv1beta1.Condition{ { Type: infrav1.VMProvisionedCondition, Status: conditionStatus, @@ -896,7 +896,7 @@ func Test_VimMachineService_SyncFailureReason(t *testing.T) { controllerManagerContext := fake.NewControllerManagerContext(vsphereVM) machineCtx := fake.NewMachineContext(ctx, fake.NewClusterContext(ctx, controllerManagerContext), controllerManagerContext) machineCtx.Machine.SetName(fakeLongClusterName) - machineCtx.Machine.SetLabels(map[string]string{clusterv1.MachineControlPlaneLabel: "fake-control-plane"}) + machineCtx.Machine.SetLabels(map[string]string{clusterv1beta1.MachineControlPlaneLabel: "fake-control-plane"}) vimMachineService := &VimMachineService{controllerManagerContext.Client} t.Run("syncs failure reason successfully", func(t *testing.T) { diff --git a/pkg/services/vmoperator/control_plane_endpoint.go b/pkg/services/vmoperator/control_plane_endpoint.go index c265566372..06a99ed445 100644 --- a/pkg/services/vmoperator/control_plane_endpoint.go +++ b/pkg/services/vmoperator/control_plane_endpoint.go @@ -25,9 +25,9 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + deprecatedv1beta2conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions/v1beta2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -67,7 +67,7 @@ type CPService struct { } // ReconcileControlPlaneEndpointService manages the lifecycle of a control plane endpoint managed by a vmoperator VirtualMachineService. -func (s *CPService) ReconcileControlPlaneEndpointService(ctx context.Context, clusterCtx *vmware.ClusterContext, netProvider services.NetworkProvider) (*clusterv1.APIEndpoint, error) { +func (s *CPService) ReconcileControlPlaneEndpointService(ctx context.Context, clusterCtx *vmware.ClusterContext, netProvider services.NetworkProvider) (*clusterv1beta1.APIEndpoint, error) { log := ctrl.LoggerFrom(ctx) log.V(4).Info("Reconciling control plane VirtualMachineService for cluster") @@ -80,8 +80,8 @@ func (s *CPService) ReconcileControlPlaneEndpointService(ctx context.Context, cl if err != nil { if !apierrors.IsNotFound(err) { err = errors.Wrapf(err, "failed to check if VirtualMachineService exists") - conditions.MarkFalse(clusterCtx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition, vmwarev1.LoadBalancerCreationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ + deprecatedconditions.MarkFalse(clusterCtx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition, vmwarev1.LoadBalancerCreationFailedReason, clusterv1beta1.ConditionSeverityWarning, "%v", err) + deprecatedv1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ Type: vmwarev1.VSphereClusterLoadBalancerReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: vmwarev1.VSphereClusterLoadBalancerNotReadyV1Beta2Reason, @@ -94,8 +94,8 @@ func (s *CPService) ReconcileControlPlaneEndpointService(ctx context.Context, cl annotations, err := netProvider.GetVMServiceAnnotations(ctx, clusterCtx) if err != nil { err = errors.Wrapf(err, "failed to get provider VirtualMachineService annotations") - conditions.MarkFalse(clusterCtx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition, vmwarev1.LoadBalancerCreationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ + deprecatedconditions.MarkFalse(clusterCtx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition, vmwarev1.LoadBalancerCreationFailedReason, clusterv1beta1.ConditionSeverityWarning, "%v", err) + deprecatedv1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ Type: vmwarev1.VSphereClusterLoadBalancerReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: vmwarev1.VSphereClusterLoadBalancerNotReadyV1Beta2Reason, @@ -107,8 +107,8 @@ func (s *CPService) ReconcileControlPlaneEndpointService(ctx context.Context, cl vmService, err = s.createVMControlPlaneService(ctx, clusterCtx, annotations) if err != nil { err = errors.Wrapf(err, "failed to create VirtualMachineService") - conditions.MarkFalse(clusterCtx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition, vmwarev1.LoadBalancerCreationFailedReason, clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ + deprecatedconditions.MarkFalse(clusterCtx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition, vmwarev1.LoadBalancerCreationFailedReason, clusterv1beta1.ConditionSeverityWarning, "%v", err) + deprecatedv1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ Type: vmwarev1.VSphereClusterLoadBalancerReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: vmwarev1.VSphereClusterLoadBalancerNotReadyV1Beta2Reason, @@ -122,8 +122,8 @@ func (s *CPService) ReconcileControlPlaneEndpointService(ctx context.Context, cl vip, err := getVMServiceVIP(vmService) if err != nil { err = errors.Wrapf(err, "VirtualMachineService LB does not yet have VIP assigned") - conditions.MarkFalse(clusterCtx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition, vmwarev1.WaitingForLoadBalancerIPReason, clusterv1.ConditionSeverityInfo, err.Error()) - v1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ + deprecatedconditions.MarkFalse(clusterCtx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition, vmwarev1.WaitingForLoadBalancerIPReason, clusterv1beta1.ConditionSeverityInfo, "%v", err) + deprecatedv1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ Type: vmwarev1.VSphereClusterLoadBalancerReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: vmwarev1.VSphereClusterLoadBalancerWaitingForIPV1Beta2Reason, @@ -135,8 +135,8 @@ func (s *CPService) ReconcileControlPlaneEndpointService(ctx context.Context, cl cpEndpoint, err := getAPIEndpointFromVIP(vmService, vip) if err != nil { err = errors.Wrapf(err, "VirtualMachineService LB does not have an apiserver endpoint") - conditions.MarkFalse(clusterCtx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition, vmwarev1.WaitingForLoadBalancerIPReason, clusterv1.ConditionSeverityWarning, err.Error()) - v1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ + deprecatedconditions.MarkFalse(clusterCtx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition, vmwarev1.WaitingForLoadBalancerIPReason, clusterv1beta1.ConditionSeverityWarning, "%v", err) + deprecatedv1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ Type: vmwarev1.VSphereClusterLoadBalancerReadyV1Beta2Condition, Status: metav1.ConditionFalse, Reason: vmwarev1.VSphereClusterLoadBalancerWaitingForIPV1Beta2Reason, @@ -145,8 +145,8 @@ func (s *CPService) ReconcileControlPlaneEndpointService(ctx context.Context, cl return nil, err } - conditions.MarkTrue(clusterCtx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition) - v1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ + deprecatedconditions.MarkTrue(clusterCtx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition) + deprecatedv1beta2conditions.Set(clusterCtx.VSphereCluster, metav1.Condition{ Type: vmwarev1.VSphereClusterLoadBalancerReadyV1Beta2Condition, Status: metav1.ConditionTrue, Reason: vmwarev1.VSphereClusterLoadBalancerReadyV1Beta2Reason, @@ -296,7 +296,7 @@ func getVMServiceVIP(vmService *vmoprv1.VirtualMachineService) (string, error) { return "", fmt.Errorf("VirtualMachineService LoadBalancer does not have any Ingresses") } -func getAPIEndpointFromVIP(vmService *vmoprv1.VirtualMachineService, vip string) (*clusterv1.APIEndpoint, error) { +func getAPIEndpointFromVIP(vmService *vmoprv1.VirtualMachineService, vip string) (*clusterv1beta1.APIEndpoint, error) { name := controlPlaneServiceAPIServerPortName servicePort := int32(-1) for _, port := range vmService.Spec.Ports { @@ -310,7 +310,7 @@ func getAPIEndpointFromVIP(vmService *vmoprv1.VirtualMachineService, vip string) return nil, fmt.Errorf("VirtualMachineService does not have port entry for %q", name) } - return &clusterv1.APIEndpoint{ + return &clusterv1beta1.APIEndpoint{ Host: vip, Port: servicePort, }, nil diff --git a/pkg/services/vmoperator/control_plane_endpoint_test.go b/pkg/services/vmoperator/control_plane_endpoint_test.go index cee629fa25..e991b5a959 100644 --- a/pkg/services/vmoperator/control_plane_endpoint_test.go +++ b/pkg/services/vmoperator/control_plane_endpoint_test.go @@ -27,8 +27,9 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" @@ -95,7 +96,7 @@ var _ = Describe("ControlPlaneEndpoint Tests", func() { expectedPort int expectedAnnotations map[string]string expectedClusterRoleVMLabels map[string]string - expectedConditions clusterv1.Conditions + expectedConditions clusterv1beta1.Conditions cluster *clusterv1.Cluster vsphereCluster *vmwarev1.VSphereCluster @@ -104,7 +105,7 @@ var _ = Describe("ControlPlaneEndpoint Tests", func() { controllerManagerContext *capvcontext.ControllerManagerContext c ctrlclient.Client - apiEndpoint *clusterv1.APIEndpoint + apiEndpoint *clusterv1beta1.APIEndpoint vms *vmoprv1.VirtualMachineService cpService CPService @@ -154,7 +155,7 @@ var _ = Describe("ControlPlaneEndpoint Tests", func() { } for _, expectedCondition := range expectedConditions { - c := conditions.Get(clusterCtx.VSphereCluster, expectedCondition.Type) + c := deprecatedconditions.Get(clusterCtx.VSphereCluster, expectedCondition.Type) Expect(c).NotTo(BeNil()) Expect(c.Status).To(Equal(expectedCondition.Status)) Expect(c.Reason).To(Equal(expectedCondition.Reason)) @@ -172,7 +173,7 @@ var _ = Describe("ControlPlaneEndpoint Tests", func() { expectAPIEndpoint = false expectVMS = false apiEndpoint, err = cpService.ReconcileControlPlaneEndpointService(ctx, clusterCtx, network.DummyNetworkProvider()) - Expect(conditions.Get(clusterCtx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition)).To(BeNil()) + Expect(deprecatedconditions.Get(clusterCtx.VSphereCluster, vmwarev1.LoadBalancerReadyCondition)).To(BeNil()) verifyOutput() }) @@ -220,7 +221,7 @@ var _ = Describe("ControlPlaneEndpoint Tests", func() { By("NetOp NetworkProvider has no Network") netOpProvider := network.NetOpNetworkProvider(c) // we expect the reconciliation fail because lack of bootstrap data - expectedConditions = append(expectedConditions, clusterv1.Condition{ + expectedConditions = append(expectedConditions, clusterv1beta1.Condition{ Type: vmwarev1.LoadBalancerReadyCondition, Status: corev1.ConditionFalse, Reason: vmwarev1.LoadBalancerCreationFailedReason, @@ -262,7 +263,7 @@ var _ = Describe("ControlPlaneEndpoint Tests", func() { // A VirtualMachineService is only created once all prerequisites have been met expectVMS = false expectedType = vmoprv1.VirtualMachineServiceTypeLoadBalancer - expectedConditions = append(expectedConditions, clusterv1.Condition{ + expectedConditions = append(expectedConditions, clusterv1beta1.Condition{ Type: vmwarev1.LoadBalancerReadyCondition, Status: corev1.ConditionFalse, Reason: vmwarev1.LoadBalancerCreationFailedReason, diff --git a/pkg/services/vmoperator/vmopmachine.go b/pkg/services/vmoperator/vmopmachine.go index 4f04868d49..4cdadcc6bc 100644 --- a/pkg/services/vmoperator/vmopmachine.go +++ b/pkg/services/vmoperator/vmopmachine.go @@ -30,9 +30,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" apitypes "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" - v1beta2conditions "sigs.k8s.io/cluster-api/util/conditions/v1beta2" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + deprecatedv1beta2conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions/v1beta2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" ctrlutil "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -205,9 +206,9 @@ func (v *VmopMachineService) ReconcileNormal(ctx context.Context, machineCtx cap // Reconcile the VM Operator VirtualMachine. if err := v.reconcileVMOperatorVM(ctx, supervisorMachineCtx, vmOperatorVM); err != nil { - conditions.MarkFalse(supervisorMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, vmwarev1.VMCreationFailedReason, clusterv1.ConditionSeverityWarning, - fmt.Sprintf("failed to create or update VirtualMachine: %v", err)) - v1beta2conditions.Set(supervisorMachineCtx.VSphereMachine, metav1.Condition{ + deprecatedconditions.MarkFalse(supervisorMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, vmwarev1.VMCreationFailedReason, clusterv1beta1.ConditionSeverityWarning, + "failed to create or update VirtualMachine: %v", err) + deprecatedv1beta2conditions.Set(supervisorMachineCtx.VSphereMachine, metav1.Condition{ Type: infrav1.VSphereMachineVirtualMachineProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereMachineVirtualMachineNotProvisionedV1Beta2Reason, @@ -246,8 +247,8 @@ func (v *VmopMachineService) ReconcileNormal(ctx context.Context, machineCtx cap if c == nil || c.Status != metav1.ConditionFalse { continue } - conditions.MarkFalse(supervisorMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, c.Reason, clusterv1.ConditionSeverityError, c.Message) - v1beta2conditions.Set(supervisorMachineCtx.VSphereMachine, metav1.Condition{ + deprecatedconditions.MarkFalse(supervisorMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, c.Reason, clusterv1beta1.ConditionSeverityError, "%s", c.Message) + deprecatedv1beta2conditions.Set(supervisorMachineCtx.VSphereMachine, metav1.Condition{ Type: infrav1.VSphereMachineVirtualMachineProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: c.Reason, @@ -257,8 +258,8 @@ func (v *VmopMachineService) ReconcileNormal(ctx context.Context, machineCtx cap } // All the pre-requisites are in place but the machines is not yet created, report it. - conditions.MarkFalse(supervisorMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, vmwarev1.VMProvisionStartedReason, clusterv1.ConditionSeverityInfo, "") - v1beta2conditions.Set(supervisorMachineCtx.VSphereMachine, metav1.Condition{ + deprecatedconditions.MarkFalse(supervisorMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, vmwarev1.VMProvisionStartedReason, clusterv1beta1.ConditionSeverityInfo, "") + deprecatedv1beta2conditions.Set(supervisorMachineCtx.VSphereMachine, metav1.Condition{ Type: infrav1.VSphereMachineVirtualMachineProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereMachineVirtualMachineProvisioningV1Beta2Reason, @@ -270,8 +271,8 @@ func (v *VmopMachineService) ReconcileNormal(ctx context.Context, machineCtx cap supervisorMachineCtx.VSphereMachine.Status.VMStatus = vmwarev1.VirtualMachineStateCreated if vmOperatorVM.Status.PowerState != vmoprv1.VirtualMachinePowerStateOn { - conditions.MarkFalse(supervisorMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, vmwarev1.PoweringOnReason, clusterv1.ConditionSeverityInfo, "") - v1beta2conditions.Set(supervisorMachineCtx.VSphereMachine, metav1.Condition{ + deprecatedconditions.MarkFalse(supervisorMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, vmwarev1.PoweringOnReason, clusterv1beta1.ConditionSeverityInfo, "") + deprecatedv1beta2conditions.Set(supervisorMachineCtx.VSphereMachine, metav1.Condition{ Type: infrav1.VSphereMachineVirtualMachineProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereMachineVirtualMachinePoweringOnV1Beta2Reason, @@ -283,8 +284,8 @@ func (v *VmopMachineService) ReconcileNormal(ctx context.Context, machineCtx cap supervisorMachineCtx.VSphereMachine.Status.VMStatus = vmwarev1.VirtualMachineStatePoweredOn if vmOperatorVM.Status.Network == nil || (vmOperatorVM.Status.Network.PrimaryIP4 == "" && vmOperatorVM.Status.Network.PrimaryIP6 == "") { - conditions.MarkFalse(supervisorMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, vmwarev1.WaitingForNetworkAddressReason, clusterv1.ConditionSeverityInfo, "") - v1beta2conditions.Set(supervisorMachineCtx.VSphereMachine, metav1.Condition{ + deprecatedconditions.MarkFalse(supervisorMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, vmwarev1.WaitingForNetworkAddressReason, clusterv1beta1.ConditionSeverityInfo, "") + deprecatedv1beta2conditions.Set(supervisorMachineCtx.VSphereMachine, metav1.Condition{ Type: infrav1.VSphereMachineVirtualMachineProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereMachineVirtualMachineWaitingForNetworkAddressV1Beta2Reason, @@ -294,8 +295,8 @@ func (v *VmopMachineService) ReconcileNormal(ctx context.Context, machineCtx cap } if vmOperatorVM.Status.BiosUUID == "" { - conditions.MarkFalse(supervisorMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, vmwarev1.WaitingForBIOSUUIDReason, clusterv1.ConditionSeverityInfo, "") - v1beta2conditions.Set(supervisorMachineCtx.VSphereMachine, metav1.Condition{ + deprecatedconditions.MarkFalse(supervisorMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition, vmwarev1.WaitingForBIOSUUIDReason, clusterv1beta1.ConditionSeverityInfo, "") + deprecatedv1beta2conditions.Set(supervisorMachineCtx.VSphereMachine, metav1.Condition{ Type: infrav1.VSphereMachineVirtualMachineProvisionedV1Beta2Condition, Status: metav1.ConditionFalse, Reason: infrav1.VSphereMachineVirtualMachineWaitingForBIOSUUIDV1Beta2Reason, @@ -316,8 +317,8 @@ func (v *VmopMachineService) ReconcileNormal(ctx context.Context, machineCtx cap // Mark the VSphereMachine as Ready supervisorMachineCtx.VSphereMachine.Status.Ready = true - conditions.MarkTrue(supervisorMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition) - v1beta2conditions.Set(supervisorMachineCtx.VSphereMachine, metav1.Condition{ + deprecatedconditions.MarkTrue(supervisorMachineCtx.VSphereMachine, infrav1.VMProvisionedCondition) + deprecatedv1beta2conditions.Set(supervisorMachineCtx.VSphereMachine, metav1.Condition{ Type: infrav1.VSphereMachineVirtualMachineProvisionedV1Beta2Condition, Status: metav1.ConditionTrue, Reason: infrav1.VSphereMachineVirtualMachineProvisionedV1Beta2Reason, @@ -459,7 +460,7 @@ func (v *VmopMachineService) reconcileVMOperatorVM(ctx context.Context, supervis // Not all network providers (for example, NSX-VPC) provide support for VM // readiness probes. The flag PerformsVMReadinessProbe is used to determine // whether a VM readiness probe should be conducted. - if v.ConfigureControlPlaneVMReadinessProbe && infrautilv1.IsControlPlaneMachine(supervisorMachineCtx.Machine) && supervisorMachineCtx.Cluster.Status.ControlPlaneReady { + if v.ConfigureControlPlaneVMReadinessProbe && infrautilv1.IsControlPlaneMachine(supervisorMachineCtx.Machine) && supervisorMachineCtx.Cluster.Status.Initialization != nil && supervisorMachineCtx.Cluster.Status.Initialization.ControlPlaneInitialized { vmOperatorVM.Spec.ReadinessProbe = &vmoprv1.VirtualMachineReadinessProbeSpec{ TCPSocket: &vmoprv1.TCPSocketAction{ Port: intstr.FromInt(defaultAPIBindPort), diff --git a/pkg/services/vmoperator/vmopmachine_test.go b/pkg/services/vmoperator/vmopmachine_test.go index 69a3d9076c..8b0138f218 100644 --- a/pkg/services/vmoperator/vmopmachine_test.go +++ b/pkg/services/vmoperator/vmopmachine_test.go @@ -32,8 +32,9 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" @@ -92,7 +93,7 @@ var _ = Describe("VirtualMachine tests", func() { expectReconcileError bool expectVMOpVM bool expectedState vmwarev1.VirtualMachineState - expectedConditions clusterv1.Conditions + expectedConditions clusterv1beta1.Conditions expectedRequeue bool cluster *clusterv1.Cluster @@ -166,7 +167,7 @@ var _ = Describe("VirtualMachine tests", func() { } for _, expectedCondition := range expectedConditions { - c := conditions.Get(machineContext.VSphereMachine, expectedCondition.Type) + c := deprecatedconditions.Get(machineContext.VSphereMachine, expectedCondition.Type) Expect(c).NotTo(BeNil()) Expect(c.Status).To(Equal(expectedCondition.Status)) Expect(c.Reason).To(Equal(expectedCondition.Reason)) @@ -193,7 +194,7 @@ var _ = Describe("VirtualMachine tests", func() { // bootstrap data resource, but VM Operator is not // running in this test domain, and so the condition // will not be set on the VM Operator VM. - expectedConditions = append(expectedConditions, clusterv1.Condition{ + expectedConditions = append(expectedConditions, clusterv1beta1.Condition{ Type: infrav1.VMProvisionedCondition, Status: corev1.ConditionFalse, Reason: vmwarev1.VMProvisionStartedReason, @@ -340,7 +341,7 @@ var _ = Describe("VirtualMachine tests", func() { Expect(vmService.Client.Create(ctx, secret)).To(Succeed()) machine.Spec.Bootstrap.DataSecretName = &secretName - expectedConditions = append(expectedConditions, clusterv1.Condition{ + expectedConditions = append(expectedConditions, clusterv1beta1.Condition{ Type: infrav1.VMProvisionedCondition, Status: corev1.ConditionFalse, Reason: vmwarev1.VMProvisionStartedReason, @@ -405,7 +406,10 @@ var _ = Describe("VirtualMachine tests", func() { By("Setting cluster.Status.ControlPlaneReady to true") // Set the control plane to be ready so that the new VM will have a probe - cluster.Status.ControlPlaneReady = true + if cluster.Status.Initialization == nil { + cluster.Status.Initialization = &clusterv1.ClusterInitializationStatus{} + } + cluster.Status.Initialization.ControlPlaneInitialized = true vmopVM = getReconciledVM(ctx, vmService, supervisorMachineContext) if vmopVM.Status.Network == nil { @@ -426,7 +430,7 @@ var _ = Describe("VirtualMachine tests", func() { By("Machine doens't have a K8S version") machine.Spec.Version = nil - expectedConditions = append(expectedConditions, clusterv1.Condition{ + expectedConditions = append(expectedConditions, clusterv1beta1.Condition{ Type: infrav1.VMProvisionedCondition, Status: corev1.ConditionFalse, Reason: vmwarev1.VMCreationFailedReason, @@ -467,10 +471,10 @@ var _ = Describe("VirtualMachine tests", func() { expectedImageName = imageName expectReconcileError = true expectVMOpVM = true - expectedConditions = append(expectedConditions, clusterv1.Condition{ + expectedConditions = append(expectedConditions, clusterv1beta1.Condition{ Type: infrav1.VMProvisionedCondition, Status: corev1.ConditionFalse, - Severity: clusterv1.ConditionSeverityError, + Severity: clusterv1beta1.ConditionSeverityError, Reason: "NotFound", Message: errMessage, }) diff --git a/pkg/util/cluster.go b/pkg/util/cluster.go index e0fcce7efd..9cb2247348 100644 --- a/pkg/util/cluster.go +++ b/pkg/util/cluster.go @@ -21,7 +21,7 @@ import ( "github.com/pkg/errors" apitypes "k8s.io/apimachinery/pkg/types" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" diff --git a/pkg/util/fetch_object.go b/pkg/util/fetch_object.go index d540b9515c..c44ff4e54f 100644 --- a/pkg/util/fetch_object.go +++ b/pkg/util/fetch_object.go @@ -22,8 +22,8 @@ import ( "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" ) diff --git a/pkg/util/fetch_object_test.go b/pkg/util/fetch_object_test.go index 002435927e..7d0a79bae9 100644 --- a/pkg/util/fetch_object_test.go +++ b/pkg/util/fetch_object_test.go @@ -22,8 +22,8 @@ import ( "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/cluster-api-provider-vsphere/pkg/context/fake" diff --git a/pkg/util/kubeclient.go b/pkg/util/kubeclient.go index b7e504234d..70bd4586a5 100644 --- a/pkg/util/kubeclient.go +++ b/pkg/util/kubeclient.go @@ -23,7 +23,7 @@ import ( "github.com/pkg/errors" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" kcfg "sigs.k8s.io/cluster-api/util/kubeconfig" "sigs.k8s.io/controller-runtime/pkg/client" ) diff --git a/pkg/util/machines.go b/pkg/util/machines.go index 3c0545575e..ead106a963 100644 --- a/pkg/util/machines.go +++ b/pkg/util/machines.go @@ -28,7 +28,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" apitypes "k8s.io/apimachinery/pkg/types" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" @@ -66,7 +67,7 @@ func GetMachinePreferredIPAddress(machine *infrav1.VSphereMachine) (string, erro } for _, machineAddr := range machine.Status.Addresses { - if machineAddr.Type != clusterv1.MachineExternalIP { + if machineAddr.Type != clusterv1beta1.MachineExternalIP { continue } if cidr == nil { diff --git a/pkg/util/machines_test.go b/pkg/util/machines_test.go index 9cfebb232c..a6a5089ec3 100644 --- a/pkg/util/machines_test.go +++ b/pkg/util/machines_test.go @@ -25,7 +25,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -45,9 +46,9 @@ func Test_GetMachinePreferredIPAddress(t *testing.T) { name: "single IPv4 address, no preferred CIDR", machine: &infrav1.VSphereMachine{ Status: infrav1.VSphereMachineStatus{ - Addresses: []clusterv1.MachineAddress{ + Addresses: []clusterv1beta1.MachineAddress{ { - Type: clusterv1.MachineExternalIP, + Type: clusterv1beta1.MachineExternalIP, Address: "192.168.0.1", }, }, @@ -60,9 +61,9 @@ func Test_GetMachinePreferredIPAddress(t *testing.T) { name: "single IPv6 address, no preferred CIDR", machine: &infrav1.VSphereMachine{ Status: infrav1.VSphereMachineStatus{ - Addresses: []clusterv1.MachineAddress{ + Addresses: []clusterv1beta1.MachineAddress{ { - Type: clusterv1.MachineExternalIP, + Type: clusterv1beta1.MachineExternalIP, Address: "fdf3:35b5:9dad:6e09::0001", }, }, @@ -75,17 +76,17 @@ func Test_GetMachinePreferredIPAddress(t *testing.T) { name: "multiple IPv4 addresses, only 1 internal, no preferred CIDR", machine: &infrav1.VSphereMachine{ Status: infrav1.VSphereMachineStatus{ - Addresses: []clusterv1.MachineAddress{ + Addresses: []clusterv1beta1.MachineAddress{ { - Type: clusterv1.MachineExternalIP, + Type: clusterv1beta1.MachineExternalIP, Address: "192.168.0.1", }, { - Type: clusterv1.MachineExternalIP, + Type: clusterv1beta1.MachineExternalIP, Address: "1.1.1.1", }, { - Type: clusterv1.MachineExternalIP, + Type: clusterv1beta1.MachineExternalIP, Address: "2.2.2.2", }, }, @@ -105,13 +106,13 @@ func Test_GetMachinePreferredIPAddress(t *testing.T) { }, }, Status: infrav1.VSphereMachineStatus{ - Addresses: []clusterv1.MachineAddress{ + Addresses: []clusterv1beta1.MachineAddress{ { - Type: clusterv1.MachineExternalIP, + Type: clusterv1beta1.MachineExternalIP, Address: "192.168.0.1", }, { - Type: clusterv1.MachineExternalIP, + Type: clusterv1beta1.MachineExternalIP, Address: "172.17.0.1", }, }, @@ -131,13 +132,13 @@ func Test_GetMachinePreferredIPAddress(t *testing.T) { }, }, Status: infrav1.VSphereMachineStatus{ - Addresses: []clusterv1.MachineAddress{ + Addresses: []clusterv1beta1.MachineAddress{ { - Type: clusterv1.MachineExternalIP, + Type: clusterv1beta1.MachineExternalIP, Address: "192.168.0.1", }, { - Type: clusterv1.MachineExternalIP, + Type: clusterv1beta1.MachineExternalIP, Address: "fdf3:35b5:9dad:6e09::0001", }, }, @@ -158,13 +159,13 @@ func Test_GetMachinePreferredIPAddress(t *testing.T) { }, Status: infrav1.VSphereMachineStatus{ - Addresses: []clusterv1.MachineAddress{ + Addresses: []clusterv1beta1.MachineAddress{ { - Type: clusterv1.MachineExternalIP, + Type: clusterv1beta1.MachineExternalIP, Address: "192.168.0.1", }, { - Type: clusterv1.MachineExternalIP, + Type: clusterv1beta1.MachineExternalIP, Address: "fdf3:35b5:9dad:6e09::0001", }, }, @@ -184,7 +185,7 @@ func Test_GetMachinePreferredIPAddress(t *testing.T) { }, }, Status: infrav1.VSphereMachineStatus{ - Addresses: []clusterv1.MachineAddress{}, + Addresses: []clusterv1beta1.MachineAddress{}, }, }, ipAddr: "", @@ -202,9 +203,9 @@ func Test_GetMachinePreferredIPAddress(t *testing.T) { }, Status: infrav1.VSphereMachineStatus{ - Addresses: []clusterv1.MachineAddress{ + Addresses: []clusterv1beta1.MachineAddress{ { - Type: clusterv1.MachineExternalIP, + Type: clusterv1beta1.MachineExternalIP, Address: "10.0.0.1", }, }, diff --git a/pkg/util/testutil.go b/pkg/util/testutil.go index c7f32add53..4556a904de 100644 --- a/pkg/util/testutil.go +++ b/pkg/util/testutil.go @@ -25,8 +25,8 @@ import ( "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" "k8s.io/klog/v2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client/fake" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" diff --git a/templates/cluster-template-external-loadbalancer.yaml b/templates/cluster-template-external-loadbalancer.yaml index 0da7583926..1413450182 100644 --- a/templates/cluster-template-external-loadbalancer.yaml +++ b/templates/cluster-template-external-loadbalancer.yaml @@ -1,4 +1,4 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: labels: @@ -11,7 +11,7 @@ spec: cidrBlocks: - 192.168.0.0/16 controlPlaneRef: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlane name: '${CLUSTER_NAME}' infrastructureRef: @@ -86,7 +86,7 @@ spec: storagePolicyName: '${VSPHERE_STORAGE_POLICY}' template: '${VSPHERE_TEMPLATE}' --- -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlane metadata: name: '${CLUSTER_NAME}' @@ -131,7 +131,7 @@ spec: replicas: ${CONTROL_PLANE_MACHINE_COUNT} version: '${KUBERNETES_VERSION}' --- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate metadata: name: '${CLUSTER_NAME}-md-0' @@ -160,7 +160,7 @@ spec: - '${VSPHERE_SSH_AUTHORIZED_KEY}' sudo: ALL=(ALL) NOPASSWD:ALL --- -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: MachineDeployment metadata: labels: @@ -179,7 +179,7 @@ spec: spec: bootstrap: configRef: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate name: '${CLUSTER_NAME}-md-0' clusterName: '${CLUSTER_NAME}' @@ -189,7 +189,7 @@ spec: name: ${CLUSTER_NAME}-worker version: '${KUBERNETES_VERSION}' --- -apiVersion: addons.cluster.x-k8s.io/v1beta1 +apiVersion: addons.cluster.x-k8s.io/v1beta2 kind: ClusterResourceSet metadata: labels: diff --git a/templates/cluster-template-ignition.yaml b/templates/cluster-template-ignition.yaml index 6b09213f15..b6c9e78df3 100644 --- a/templates/cluster-template-ignition.yaml +++ b/templates/cluster-template-ignition.yaml @@ -1,4 +1,4 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: labels: @@ -11,7 +11,7 @@ spec: cidrBlocks: - 192.168.0.0/16 controlPlaneRef: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlane name: '${CLUSTER_NAME}' infrastructureRef: @@ -60,7 +60,7 @@ spec: storagePolicyName: '${VSPHERE_STORAGE_POLICY}' template: '${VSPHERE_TEMPLATE}' --- -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlane metadata: name: '${CLUSTER_NAME}' @@ -291,7 +291,7 @@ spec: replicas: ${CONTROL_PLANE_MACHINE_COUNT} version: '${KUBERNETES_VERSION}' --- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate metadata: name: '${CLUSTER_NAME}-md-0' @@ -381,7 +381,7 @@ spec: - '${VSPHERE_SSH_AUTHORIZED_KEY}' sudo: ALL=(ALL) NOPASSWD:ALL --- -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: MachineDeployment metadata: labels: @@ -400,7 +400,7 @@ spec: spec: bootstrap: configRef: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate name: '${CLUSTER_NAME}-md-0' clusterName: '${CLUSTER_NAME}' @@ -410,7 +410,7 @@ spec: name: '${CLUSTER_NAME}' version: '${KUBERNETES_VERSION}' --- -apiVersion: addons.cluster.x-k8s.io/v1beta1 +apiVersion: addons.cluster.x-k8s.io/v1beta2 kind: ClusterResourceSet metadata: labels: diff --git a/templates/cluster-template-node-ipam.yaml b/templates/cluster-template-node-ipam.yaml index e562935811..2b55abcbd5 100644 --- a/templates/cluster-template-node-ipam.yaml +++ b/templates/cluster-template-node-ipam.yaml @@ -1,4 +1,4 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: labels: @@ -11,7 +11,7 @@ spec: cidrBlocks: - 192.168.0.0/16 controlPlaneRef: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlane name: '${CLUSTER_NAME}' infrastructureRef: @@ -96,7 +96,7 @@ spec: storagePolicyName: '${VSPHERE_STORAGE_POLICY}' template: '${VSPHERE_TEMPLATE}' --- -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlane metadata: name: '${CLUSTER_NAME}' @@ -266,7 +266,7 @@ spec: replicas: ${CONTROL_PLANE_MACHINE_COUNT} version: '${KUBERNETES_VERSION}' --- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate metadata: name: '${CLUSTER_NAME}-md-0' @@ -295,7 +295,7 @@ spec: - '${VSPHERE_SSH_AUTHORIZED_KEY}' sudo: ALL=(ALL) NOPASSWD:ALL --- -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: MachineDeployment metadata: labels: @@ -314,7 +314,7 @@ spec: spec: bootstrap: configRef: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate name: '${CLUSTER_NAME}-md-0' clusterName: '${CLUSTER_NAME}' @@ -324,7 +324,7 @@ spec: name: ${CLUSTER_NAME}-worker version: '${KUBERNETES_VERSION}' --- -apiVersion: addons.cluster.x-k8s.io/v1beta1 +apiVersion: addons.cluster.x-k8s.io/v1beta2 kind: ClusterResourceSet metadata: labels: diff --git a/templates/cluster-template-supervisor.yaml b/templates/cluster-template-supervisor.yaml index 835f48b1c5..c29b0802c3 100644 --- a/templates/cluster-template-supervisor.yaml +++ b/templates/cluster-template-supervisor.yaml @@ -1,4 +1,4 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: labels: @@ -11,7 +11,7 @@ spec: cidrBlocks: - 192.168.0.0/16 controlPlaneRef: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlane name: '${CLUSTER_NAME}' infrastructureRef: @@ -55,7 +55,7 @@ spec: powerOffMode: ${VSPHERE_POWER_OFF_MODE:=trySoft} storageClass: ${VSPHERE_STORAGE_CLASS} --- -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlane metadata: name: '${CLUSTER_NAME}' @@ -226,7 +226,7 @@ spec: replicas: ${CONTROL_PLANE_MACHINE_COUNT} version: '${KUBERNETES_VERSION}' --- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate metadata: name: '${CLUSTER_NAME}-md-0' @@ -256,7 +256,7 @@ spec: - '${VSPHERE_SSH_AUTHORIZED_KEY}' sudo: ALL=(ALL) NOPASSWD:ALL --- -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: MachineDeployment metadata: labels: @@ -275,7 +275,7 @@ spec: spec: bootstrap: configRef: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate name: '${CLUSTER_NAME}-md-0' clusterName: '${CLUSTER_NAME}' @@ -285,7 +285,7 @@ spec: name: ${CLUSTER_NAME}-worker version: '${KUBERNETES_VERSION}' --- -apiVersion: addons.cluster.x-k8s.io/v1beta1 +apiVersion: addons.cluster.x-k8s.io/v1beta2 kind: ClusterResourceSet metadata: labels: diff --git a/templates/cluster-template-topology-supervisor.yaml b/templates/cluster-template-topology-supervisor.yaml index 0531a3095a..e85c651b52 100644 --- a/templates/cluster-template-topology-supervisor.yaml +++ b/templates/cluster-template-topology-supervisor.yaml @@ -1,4 +1,4 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: labels: @@ -7,7 +7,8 @@ metadata: namespace: '${NAMESPACE}' spec: topology: - class: '${CLUSTER_CLASS_NAME}' + classRef: + name: '${CLUSTER_CLASS_NAME}' controlPlane: replicas: ${CONTROL_PLANE_MACHINE_COUNT} variables: @@ -96,7 +97,7 @@ spec: name: md-0 replicas: ${WORKER_MACHINE_COUNT} --- -apiVersion: addons.cluster.x-k8s.io/v1beta1 +apiVersion: addons.cluster.x-k8s.io/v1beta2 kind: ClusterResourceSet metadata: labels: diff --git a/templates/cluster-template-topology.yaml b/templates/cluster-template-topology.yaml index 6b6669471e..4ce3261b28 100644 --- a/templates/cluster-template-topology.yaml +++ b/templates/cluster-template-topology.yaml @@ -1,4 +1,4 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: labels: @@ -7,7 +7,8 @@ metadata: namespace: '${NAMESPACE}' spec: topology: - class: '${CLUSTER_CLASS_NAME}' + classRef: + name: '${CLUSTER_CLASS_NAME}' controlPlane: replicas: ${CONTROL_PLANE_MACHINE_COUNT} variables: @@ -111,7 +112,7 @@ stringData: password: "${VSPHERE_PASSWORD}" username: "${VSPHERE_USERNAME}" --- -apiVersion: addons.cluster.x-k8s.io/v1beta1 +apiVersion: addons.cluster.x-k8s.io/v1beta2 kind: ClusterResourceSet metadata: labels: diff --git a/templates/cluster-template.yaml b/templates/cluster-template.yaml index d0a03258cc..8816b25450 100644 --- a/templates/cluster-template.yaml +++ b/templates/cluster-template.yaml @@ -1,4 +1,4 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: labels: @@ -11,7 +11,7 @@ spec: cidrBlocks: - 192.168.0.0/16 controlPlaneRef: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlane name: '${CLUSTER_NAME}' infrastructureRef: @@ -86,7 +86,7 @@ spec: storagePolicyName: '${VSPHERE_STORAGE_POLICY}' template: '${VSPHERE_TEMPLATE}' --- -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlane metadata: name: '${CLUSTER_NAME}' @@ -256,7 +256,7 @@ spec: replicas: ${CONTROL_PLANE_MACHINE_COUNT} version: '${KUBERNETES_VERSION}' --- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate metadata: name: '${CLUSTER_NAME}-md-0' @@ -285,7 +285,7 @@ spec: - '${VSPHERE_SSH_AUTHORIZED_KEY}' sudo: ALL=(ALL) NOPASSWD:ALL --- -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: MachineDeployment metadata: labels: @@ -304,7 +304,7 @@ spec: spec: bootstrap: configRef: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate name: '${CLUSTER_NAME}-md-0' clusterName: '${CLUSTER_NAME}' @@ -314,7 +314,7 @@ spec: name: ${CLUSTER_NAME}-worker version: '${KUBERNETES_VERSION}' --- -apiVersion: addons.cluster.x-k8s.io/v1beta1 +apiVersion: addons.cluster.x-k8s.io/v1beta2 kind: ClusterResourceSet metadata: labels: diff --git a/templates/clusterclass-template-supervisor.yaml b/templates/clusterclass-template-supervisor.yaml index 1fe1d45810..15824a06f0 100644 --- a/templates/clusterclass-template-supervisor.yaml +++ b/templates/clusterclass-template-supervisor.yaml @@ -7,7 +7,7 @@ spec: template: spec: {} --- -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: ClusterClass metadata: name: '${CLUSTER_CLASS_NAME}' @@ -20,7 +20,7 @@ spec: name: ${CLUSTER_CLASS_NAME}-template namespace: '${NAMESPACE}' ref: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate name: ${CLUSTER_CLASS_NAME}-controlplane namespace: '${NAMESPACE}' @@ -40,7 +40,7 @@ spec: path: /spec/template/spec/kubeadmConfigSpec/postKubeadmCommands value: [] selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true @@ -52,7 +52,7 @@ spec: path: /spec/template/spec/postKubeadmCommands value: [] selector: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate matchResources: machineDeploymentClass: @@ -70,7 +70,7 @@ spec: - '{{ .sshKey }}' sudo: ALL=(ALL) NOPASSWD:ALL selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true @@ -84,7 +84,7 @@ spec: - '{{ .sshKey }}' sudo: ALL=(ALL) NOPASSWD:ALL selector: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate matchResources: machineDeploymentClass: @@ -176,34 +176,34 @@ spec: path: /etc/pre-kubeadm-commands/50-kube-vip-prepare.sh permissions: "0700" selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true name: kubeVipPodManifest variables: - - metadata: {} + - deprecatedV1Beta1Metadata: {} name: sshKey required: false schema: openAPIV3Schema: description: Public key to SSH onto the cluster nodes. type: string - - metadata: {} + - deprecatedV1Beta1Metadata: {} name: controlPlaneIpAddr required: true schema: openAPIV3Schema: description: Floating VIP for the control plane. type: string - - metadata: {} + - deprecatedV1Beta1Metadata: {} name: controlPlanePort required: true schema: openAPIV3Schema: description: Port for the control plane endpoint. type: integer - - metadata: {} + - deprecatedV1Beta1Metadata: {} name: kubeVipPodManifest required: true schema: @@ -216,7 +216,7 @@ spec: template: bootstrap: ref: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate name: ${CLUSTER_CLASS_NAME}-worker-bootstrap-template namespace: '${NAMESPACE}' @@ -254,7 +254,7 @@ spec: powerOffMode: ${VSPHERE_POWER_OFF_MODE:=trySoft} storageClass: ${VSPHERE_STORAGE_CLASS} --- -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate metadata: name: ${CLUSTER_CLASS_NAME}-controlplane @@ -295,7 +295,7 @@ spec: - '${VSPHERE_SSH_AUTHORIZED_KEY}' sudo: ALL=(ALL) NOPASSWD:ALL --- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate metadata: name: ${CLUSTER_CLASS_NAME}-worker-bootstrap-template diff --git a/templates/clusterclass-template.yaml b/templates/clusterclass-template.yaml index dbacfc0e07..b1dd129562 100644 --- a/templates/clusterclass-template.yaml +++ b/templates/clusterclass-template.yaml @@ -7,7 +7,7 @@ spec: template: spec: {} --- -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: ClusterClass metadata: name: '${CLUSTER_CLASS_NAME}' @@ -20,7 +20,7 @@ spec: name: ${CLUSTER_CLASS_NAME}-template namespace: '${NAMESPACE}' ref: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate name: ${CLUSTER_CLASS_NAME}-controlplane namespace: '${NAMESPACE}' @@ -40,7 +40,7 @@ spec: path: /spec/template/spec/kubeadmConfigSpec/postKubeadmCommands value: [] selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true @@ -52,7 +52,7 @@ spec: path: /spec/template/spec/postKubeadmCommands value: [] selector: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate matchResources: machineDeploymentClass: @@ -70,7 +70,7 @@ spec: - '{{ .sshKey }}' sudo: ALL=(ALL) NOPASSWD:ALL selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true @@ -84,7 +84,7 @@ spec: - '{{ .sshKey }}' sudo: ALL=(ALL) NOPASSWD:ALL selector: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate matchResources: machineDeploymentClass: @@ -190,41 +190,41 @@ spec: path: /etc/pre-kubeadm-commands/50-kube-vip-prepare.sh permissions: "0700" selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true name: kubeVipPodManifest variables: - - metadata: {} + - deprecatedV1Beta1Metadata: {} name: sshKey required: false schema: openAPIV3Schema: description: Public key to SSH onto the cluster nodes. type: string - - metadata: {} + - deprecatedV1Beta1Metadata: {} name: controlPlaneIpAddr required: true schema: openAPIV3Schema: description: Floating VIP for the control plane. type: string - - metadata: {} + - deprecatedV1Beta1Metadata: {} name: controlPlanePort required: true schema: openAPIV3Schema: description: Port for the control plane endpoint. type: integer - - metadata: {} + - deprecatedV1Beta1Metadata: {} name: kubeVipPodManifest required: true schema: openAPIV3Schema: description: kube-vip manifest for the control plane. type: string - - metadata: {} + - deprecatedV1Beta1Metadata: {} name: infraServer required: true schema: @@ -235,7 +235,7 @@ spec: url: type: string type: object - - metadata: {} + - deprecatedV1Beta1Metadata: {} name: credsSecretName required: true schema: @@ -248,7 +248,7 @@ spec: template: bootstrap: ref: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate name: ${CLUSTER_CLASS_NAME}-worker-bootstrap-template namespace: '${NAMESPACE}' @@ -312,7 +312,7 @@ spec: storagePolicyName: '${VSPHERE_STORAGE_POLICY}' template: '${VSPHERE_TEMPLATE}' --- -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate metadata: name: ${CLUSTER_CLASS_NAME}-controlplane @@ -352,7 +352,7 @@ spec: - '${VSPHERE_SSH_AUTHORIZED_KEY}' sudo: ALL=(ALL) NOPASSWD:ALL --- -apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate metadata: name: ${CLUSTER_CLASS_NAME}-worker-bootstrap-template diff --git a/test/e2e/anti_affinity_test.go b/test/e2e/anti_affinity_test.go index 8e1cb5817e..57edb3a563 100644 --- a/test/e2e/anti_affinity_test.go +++ b/test/e2e/anti_affinity_test.go @@ -27,7 +27,7 @@ import ( "github.com/vmware/govmomi/find" "github.com/vmware/govmomi/vim25/mo" corev1 "k8s.io/api/core/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" . "sigs.k8s.io/cluster-api/test/framework/ginkgoextensions" diff --git a/test/e2e/clusterclass_rollout_test.go b/test/e2e/clusterclass_rollout_test.go index f9f1d7bbec..f997191d5a 100644 --- a/test/e2e/clusterclass_rollout_test.go +++ b/test/e2e/clusterclass_rollout_test.go @@ -19,7 +19,7 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" capie2e "sigs.k8s.io/cluster-api/test/e2e" "sigs.k8s.io/controller-runtime/pkg/client" diff --git a/test/e2e/clusterctl_upgrade_test.go b/test/e2e/clusterctl_upgrade_test.go index 2340712901..495932d524 100644 --- a/test/e2e/clusterctl_upgrade_test.go +++ b/test/e2e/clusterctl_upgrade_test.go @@ -24,7 +24,8 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" clusterctlcluster "sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster" capi_e2e "sigs.k8s.io/cluster-api/test/e2e" "sigs.k8s.io/cluster-api/test/framework" @@ -268,10 +269,14 @@ func getStableReleaseOfMinor(ctx context.Context, releaseMarkerPrefix, minorRele } func kindManagementClusterNewClusterProxyFunc(name string, kubeconfigPath string) framework.ClusterProxy { + scheme := initScheme() + // The scheme for v1beta1 is still required as long as we upgrade from v1beta1 Cluster's. + _ = clusterv1beta1.AddToScheme(scheme) + if testTarget == VCSimTestTarget { - return vcsim.NewClusterProxy(name, kubeconfigPath, initScheme()) + return vcsim.NewClusterProxy(name, kubeconfigPath, scheme) } - return framework.NewClusterProxy(name, kubeconfigPath, initScheme()) + return framework.NewClusterProxy(name, kubeconfigPath, scheme) } func crdShouldBeMigrated(crd apiextensionsv1.CustomResourceDefinition) bool { diff --git a/test/e2e/config/vsphere.yaml b/test/e2e/config/vsphere.yaml index 2a0b1a317b..071a83ce6f 100644 --- a/test/e2e/config/vsphere.yaml +++ b/test/e2e/config/vsphere.yaml @@ -26,6 +26,15 @@ providers: - name: cluster-api type: CoreProvider versions: + - name: "v1.11.99" + value: "https://storage.googleapis.com/k8s-staging-cluster-api/components/nightly_main_20250527/core-components.yaml" + type: "url" + contract: v1beta2 + files: + - sourcePath: "../data/shared/capi/v1.11/metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" - name: "{go://sigs.k8s.io/cluster-api@v1.10}" value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v1.10}/core-components.yaml" type: "url" @@ -57,6 +66,15 @@ providers: - name: kubeadm type: BootstrapProvider versions: + - name: "v1.11.99" + value: "https://storage.googleapis.com/k8s-staging-cluster-api/components/nightly_main_20250527/bootstrap-components.yaml" + type: "url" + contract: v1beta2 + files: + - sourcePath: "../data/shared/capi/v1.11/metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" - name: "{go://sigs.k8s.io/cluster-api@v1.10}" value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v1.10}/bootstrap-components.yaml" type: "url" @@ -88,6 +106,15 @@ providers: - name: kubeadm type: ControlPlaneProvider versions: + - name: "v1.11.99" + value: "https://storage.googleapis.com/k8s-staging-cluster-api/components/nightly_main_20250527/control-plane-components.yaml" + type: "url" + contract: v1beta2 + files: + - sourcePath: "../data/shared/capi/v1.11/metadata.yaml" + replacements: + - old: "imagePullPolicy: Always" + new: "imagePullPolicy: IfNotPresent" - name: "{go://sigs.k8s.io/cluster-api@v1.10}" value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/{go://sigs.k8s.io/cluster-api@v1.10}/control-plane-components.yaml" type: "url" diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/main/clusterclass/patch-k8s-install-script.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/main/clusterclass/patch-k8s-install-script.yaml index 51d92c895f..b996cf5d02 100644 --- a/test/e2e/data/infrastructure-vsphere-govmomi/main/clusterclass/patch-k8s-install-script.yaml +++ b/test/e2e/data/infrastructure-vsphere-govmomi/main/clusterclass/patch-k8s-install-script.yaml @@ -105,7 +105,7 @@ echo "$${LINE_SEPARATOR}" selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true @@ -212,7 +212,7 @@ echo "$${LINE_SEPARATOR}" selector: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate matchResources: machineDeploymentClass: diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/main/clusterclass/patch-prekubeadmscript.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/main/clusterclass/patch-prekubeadmscript.yaml index 3345f5b6d2..5e298615bb 100644 --- a/test/e2e/data/infrastructure-vsphere-govmomi/main/clusterclass/patch-prekubeadmscript.yaml +++ b/test/e2e/data/infrastructure-vsphere-govmomi/main/clusterclass/patch-prekubeadmscript.yaml @@ -12,7 +12,7 @@ permissions: "0755" content: {{ printf "%q" (regexReplaceAll "(KUBERNETES_VERSION=.*)" .preKubeadmScript (printf "KUBERNETES_VERSION=%s" .builtin.controlPlane.version)) }} selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true @@ -26,7 +26,7 @@ permissions: "0755" content: {{ printf "%q" (regexReplaceAll "(KUBERNETES_VERSION=.*)" .preKubeadmScript (printf "KUBERNETES_VERSION=%s" .builtin.machineDeployment.version)) }} selector: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate matchResources: machineDeploymentClass: diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/main/commons/cluster-network-CIDR.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/main/commons/cluster-network-CIDR.yaml index 24d0253cef..b038666eac 100644 --- a/test/e2e/data/infrastructure-vsphere-govmomi/main/commons/cluster-network-CIDR.yaml +++ b/test/e2e/data/infrastructure-vsphere-govmomi/main/commons/cluster-network-CIDR.yaml @@ -1,4 +1,4 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: name: '${CLUSTER_NAME}' diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/main/commons/cluster-resource-set-label.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/main/commons/cluster-resource-set-label.yaml index 1447050b04..a86f9c9a41 100644 --- a/test/e2e/data/infrastructure-vsphere-govmomi/main/commons/cluster-resource-set-label.yaml +++ b/test/e2e/data/infrastructure-vsphere-govmomi/main/commons/cluster-resource-set-label.yaml @@ -1,4 +1,4 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: name: '${CLUSTER_NAME}' diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/main/commons/cluster-resource-set.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/main/commons/cluster-resource-set.yaml index 6507eed65e..c09b98d965 100644 --- a/test/e2e/data/infrastructure-vsphere-govmomi/main/commons/cluster-resource-set.yaml +++ b/test/e2e/data/infrastructure-vsphere-govmomi/main/commons/cluster-resource-set.yaml @@ -4,7 +4,7 @@ metadata: name: "cni-${CLUSTER_NAME}-crs-cni" data: ${CNI_RESOURCES} --- -apiVersion: addons.cluster.x-k8s.io/v1beta1 +apiVersion: addons.cluster.x-k8s.io/v1beta2 kind: ClusterResourceSet metadata: name: "${CLUSTER_NAME}-crs-cni" diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/main/mhc-remediation/kcp/mhc.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/main/mhc-remediation/kcp/mhc.yaml index b61f434aee..ad047ffd32 100644 --- a/test/e2e/data/infrastructure-vsphere-govmomi/main/mhc-remediation/kcp/mhc.yaml +++ b/test/e2e/data/infrastructure-vsphere-govmomi/main/mhc-remediation/kcp/mhc.yaml @@ -2,7 +2,7 @@ # MachineHealthCheck object with # - a selector that targets all the machines with label cluster.x-k8s.io/control-plane="" # - unhealthyConditions triggering remediation after 10s the condition is set -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: MachineHealthCheck metadata: name: "${CLUSTER_NAME}-mhc-kcp" diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/main/mhc-remediation/md/mhc-label.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/main/mhc-remediation/md/mhc-label.yaml index c7e76f66ec..02d11c4921 100644 --- a/test/e2e/data/infrastructure-vsphere-govmomi/main/mhc-remediation/md/mhc-label.yaml +++ b/test/e2e/data/infrastructure-vsphere-govmomi/main/mhc-remediation/md/mhc-label.yaml @@ -1,4 +1,4 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: MachineDeployment metadata: name: '${CLUSTER_NAME}-md-0' diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/main/mhc-remediation/md/mhc.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/main/mhc-remediation/md/mhc.yaml index 51eb077832..bf2861f66d 100644 --- a/test/e2e/data/infrastructure-vsphere-govmomi/main/mhc-remediation/md/mhc.yaml +++ b/test/e2e/data/infrastructure-vsphere-govmomi/main/mhc-remediation/md/mhc.yaml @@ -1,7 +1,7 @@ # MachineHealthCheck object with # - a selector that targets all the machines with label e2e.remediation.label="" # - unhealthyConditions triggering remediation after 10s the condition is set -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: MachineHealthCheck metadata: name: "${CLUSTER_NAME}-mhc-md" diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/main/node-drain/kcp-drain.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/main/node-drain/kcp-drain.yaml index b2bf4de523..3d69471e80 100644 --- a/test/e2e/data/infrastructure-vsphere-govmomi/main/node-drain/kcp-drain.yaml +++ b/test/e2e/data/infrastructure-vsphere-govmomi/main/node-drain/kcp-drain.yaml @@ -1,7 +1,7 @@ # KubeadmControlPlane referenced by the Cluster object with # - the label kcp-adoption.step2, because it should be created in the second step of the kcp-adoption test. kind: KubeadmControlPlane -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 metadata: name: '${CLUSTER_NAME}' namespace: '${NAMESPACE}' diff --git a/test/e2e/data/infrastructure-vsphere-govmomi/main/node-drain/md-drain.yaml b/test/e2e/data/infrastructure-vsphere-govmomi/main/node-drain/md-drain.yaml index 3dc6058084..c655dbbdde 100644 --- a/test/e2e/data/infrastructure-vsphere-govmomi/main/node-drain/md-drain.yaml +++ b/test/e2e/data/infrastructure-vsphere-govmomi/main/node-drain/md-drain.yaml @@ -1,4 +1,4 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: MachineDeployment metadata: name: "${CLUSTER_NAME}-md-0" diff --git a/test/e2e/data/infrastructure-vsphere-supervisor/main/clusterclass/patch-k8s-install-script.yaml b/test/e2e/data/infrastructure-vsphere-supervisor/main/clusterclass/patch-k8s-install-script.yaml index 51d92c895f..b996cf5d02 100644 --- a/test/e2e/data/infrastructure-vsphere-supervisor/main/clusterclass/patch-k8s-install-script.yaml +++ b/test/e2e/data/infrastructure-vsphere-supervisor/main/clusterclass/patch-k8s-install-script.yaml @@ -105,7 +105,7 @@ echo "$${LINE_SEPARATOR}" selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true @@ -212,7 +212,7 @@ echo "$${LINE_SEPARATOR}" selector: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate matchResources: machineDeploymentClass: diff --git a/test/e2e/data/infrastructure-vsphere-supervisor/main/clusterclass/patch-prekubeadmscript.yaml b/test/e2e/data/infrastructure-vsphere-supervisor/main/clusterclass/patch-prekubeadmscript.yaml index 3345f5b6d2..5e298615bb 100644 --- a/test/e2e/data/infrastructure-vsphere-supervisor/main/clusterclass/patch-prekubeadmscript.yaml +++ b/test/e2e/data/infrastructure-vsphere-supervisor/main/clusterclass/patch-prekubeadmscript.yaml @@ -12,7 +12,7 @@ permissions: "0755" content: {{ printf "%q" (regexReplaceAll "(KUBERNETES_VERSION=.*)" .preKubeadmScript (printf "KUBERNETES_VERSION=%s" .builtin.controlPlane.version)) }} selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true @@ -26,7 +26,7 @@ permissions: "0755" content: {{ printf "%q" (regexReplaceAll "(KUBERNETES_VERSION=.*)" .preKubeadmScript (printf "KUBERNETES_VERSION=%s" .builtin.machineDeployment.version)) }} selector: - apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 kind: KubeadmConfigTemplate matchResources: machineDeploymentClass: diff --git a/test/e2e/data/infrastructure-vsphere-supervisor/main/commons/cluster-network-CIDR.yaml b/test/e2e/data/infrastructure-vsphere-supervisor/main/commons/cluster-network-CIDR.yaml index 24d0253cef..b038666eac 100644 --- a/test/e2e/data/infrastructure-vsphere-supervisor/main/commons/cluster-network-CIDR.yaml +++ b/test/e2e/data/infrastructure-vsphere-supervisor/main/commons/cluster-network-CIDR.yaml @@ -1,4 +1,4 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: name: '${CLUSTER_NAME}' diff --git a/test/e2e/data/infrastructure-vsphere-supervisor/main/commons/cluster-resource-set-label.yaml b/test/e2e/data/infrastructure-vsphere-supervisor/main/commons/cluster-resource-set-label.yaml index 1447050b04..a86f9c9a41 100644 --- a/test/e2e/data/infrastructure-vsphere-supervisor/main/commons/cluster-resource-set-label.yaml +++ b/test/e2e/data/infrastructure-vsphere-supervisor/main/commons/cluster-resource-set-label.yaml @@ -1,4 +1,4 @@ -apiVersion: cluster.x-k8s.io/v1beta1 +apiVersion: cluster.x-k8s.io/v1beta2 kind: Cluster metadata: name: '${CLUSTER_NAME}' diff --git a/test/e2e/data/infrastructure-vsphere-supervisor/main/commons/cluster-resource-set.yaml b/test/e2e/data/infrastructure-vsphere-supervisor/main/commons/cluster-resource-set.yaml index 6507eed65e..c09b98d965 100644 --- a/test/e2e/data/infrastructure-vsphere-supervisor/main/commons/cluster-resource-set.yaml +++ b/test/e2e/data/infrastructure-vsphere-supervisor/main/commons/cluster-resource-set.yaml @@ -4,7 +4,7 @@ metadata: name: "cni-${CLUSTER_NAME}-crs-cni" data: ${CNI_RESOURCES} --- -apiVersion: addons.cluster.x-k8s.io/v1beta1 +apiVersion: addons.cluster.x-k8s.io/v1beta2 kind: ClusterResourceSet metadata: name: "${CLUSTER_NAME}-crs-cni" diff --git a/test/e2e/data/shared/capi/v1.11/metadata.yaml b/test/e2e/data/shared/capi/v1.11/metadata.yaml new file mode 100644 index 0000000000..4d94191c95 --- /dev/null +++ b/test/e2e/data/shared/capi/v1.11/metadata.yaml @@ -0,0 +1,44 @@ +# maps release series of major.minor to cluster-api contract version +# the contract version may change between minor or major versions, but *not* +# between patch versions. +# +# update this file only when a new major or minor version is released +apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3 +kind: Metadata +releaseSeries: + - major: 1 + minor: 11 + contract: v1beta2 + - major: 1 + minor: 10 + contract: v1beta1 + - major: 1 + minor: 9 + contract: v1beta1 + - major: 1 + minor: 8 + contract: v1beta1 + - major: 1 + minor: 7 + contract: v1beta1 + - major: 1 + minor: 6 + contract: v1beta1 + - major: 1 + minor: 5 + contract: v1beta1 + - major: 1 + minor: 4 + contract: v1beta1 + - major: 1 + minor: 3 + contract: v1beta1 + - major: 1 + minor: 2 + contract: v1beta1 + - major: 1 + minor: 1 + contract: v1beta1 + - major: 1 + minor: 0 + contract: v1beta1 diff --git a/test/e2e/gpu_pci_passthrough_test.go b/test/e2e/gpu_pci_passthrough_test.go index 4d113d653e..2d3db1d9cb 100644 --- a/test/e2e/gpu_pci_passthrough_test.go +++ b/test/e2e/gpu_pci_passthrough_test.go @@ -26,7 +26,7 @@ import ( vim25types "github.com/vmware/govmomi/vim25/types" corev1 "k8s.io/api/core/v1" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework/clusterctl" capiutil "sigs.k8s.io/cluster-api/util" ) diff --git a/test/e2e/multivc_test.go b/test/e2e/multivc_test.go index ecea12889e..7850e49abe 100644 --- a/test/e2e/multivc_test.go +++ b/test/e2e/multivc_test.go @@ -26,7 +26,7 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" . "sigs.k8s.io/cluster-api/test/framework/ginkgoextensions" diff --git a/test/e2e/node_drain_test.go b/test/e2e/node_drain_test.go index 15f85f3c63..ccb3f2258c 100644 --- a/test/e2e/node_drain_test.go +++ b/test/e2e/node_drain_test.go @@ -34,12 +34,12 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/klog/v2" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" capi_e2e "sigs.k8s.io/cluster-api/test/e2e" "sigs.k8s.io/cluster-api/test/framework" . "sigs.k8s.io/cluster-api/test/framework/ginkgoextensions" - "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/controller-runtime/pkg/client" ) diff --git a/test/e2e/ownerrefs_finalizers_test.go b/test/e2e/ownerrefs_finalizers_test.go index 443ca3fea8..a2a2bb0090 100644 --- a/test/e2e/ownerrefs_finalizers_test.go +++ b/test/e2e/ownerrefs_finalizers_test.go @@ -35,15 +35,16 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" "k8s.io/utils/ptr" - addonsv1 "sigs.k8s.io/cluster-api/api/addons/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + addonsv1 "sigs.k8s.io/cluster-api/api/addons/v1beta2" + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" + controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" clusterctlcluster "sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster" - controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" capi_e2e "sigs.k8s.io/cluster-api/test/e2e" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/util/conditions" - "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" @@ -151,7 +152,7 @@ var _ = Describe("Ensure OwnerReferences and Finalizers are resilient [vcsim] [s for _, machine := range machineList.Items { if !conditions.IsTrue(&machine, clusterv1.MachineNodeHealthyCondition) { - return errors.Errorf("machine %q does not have %q condition set to true", machine.GetName(), clusterv1.MachineNodeHealthyCondition) + return errors.Errorf("machine %q does not have %q condition set to true", machine.GetName(), clusterv1.MachineNodeHealthyV1Beta1Condition) } } @@ -378,8 +379,8 @@ func checkGovmomiVSphereClusterFailureDomains(ctx context.Context, proxy framewo } Expect(proxy.GetClient().Get(ctx, ctrlclient.ObjectKeyFromObject(vSphereCluster), vSphereCluster)).To(Succeed()) - Expect(vSphereCluster.Status.FailureDomains).To(BeEquivalentTo(clusterv1.FailureDomains{ - "ownerrefs-finalizers": clusterv1.FailureDomainSpec{ + Expect(vSphereCluster.Status.FailureDomains).To(BeEquivalentTo(clusterv1beta1.FailureDomains{ + "ownerrefs-finalizers": clusterv1beta1.FailureDomainSpec{ ControlPlane: true, }, })) @@ -389,9 +390,9 @@ func checkSupervisorVSphereClusterFailureDomains(ctx context.Context, proxy fram avalabilityZones := &topologyv1.AvailabilityZoneList{} Expect(proxy.GetClient().List(ctx, avalabilityZones)).To(Succeed()) - wantFailureDomains := clusterv1.FailureDomains{} + wantFailureDomains := clusterv1beta1.FailureDomains{} for _, zone := range avalabilityZones.Items { - wantFailureDomains[zone.Name] = clusterv1.FailureDomainSpec{ + wantFailureDomains[zone.Name] = clusterv1beta1.FailureDomainSpec{ ControlPlane: true, } } diff --git a/test/e2e/storage_policy_test.go b/test/e2e/storage_policy_test.go index 8bec197fc3..d80efb26bd 100644 --- a/test/e2e/storage_policy_test.go +++ b/test/e2e/storage_policy_test.go @@ -27,7 +27,7 @@ import ( pbmypes "github.com/vmware/govmomi/pbm/types" corev1 "k8s.io/api/core/v1" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/controller-runtime/pkg/client" diff --git a/test/extension/handlers/topologymutation/handler.go b/test/extension/handlers/topologymutation/handler.go index 1067d154e8..824ffdbf1a 100644 --- a/test/extension/handlers/topologymutation/handler.go +++ b/test/extension/handlers/topologymutation/handler.go @@ -31,9 +31,10 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/utils/ptr" - bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" - controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" - runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" + controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + runtimehooksv1 "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1" "sigs.k8s.io/cluster-api/exp/runtime/topologymutation" ctrl "sigs.k8s.io/controller-runtime" @@ -49,13 +50,18 @@ import ( // this is convenient because in Cluster API's E2E tests all of them are using a decoder for working with typed // API objects, which makes code easier to read and less error prone than using unstructured or working with raw json/yaml. // NOTE: it is not mandatory to use a ExtensionHandlers in custom RuntimeExtension, what is important -// is to expose HandlerFunc with the signature defined in sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1. +// is to expose HandlerFunc with the signature defined in sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1. type ExtensionHandlers struct { decoder runtime.Decoder } // NewExtensionHandlers returns a new ExtensionHandlers for the topology mutation hook handlers. -func NewExtensionHandlers(scheme *runtime.Scheme) *ExtensionHandlers { +func NewExtensionHandlers() *ExtensionHandlers { + scheme := runtime.NewScheme() + _ = infrav1.AddToScheme(scheme) + _ = vmwarev1.AddToScheme(scheme) + _ = bootstrapv1.AddToScheme(scheme) + _ = controlplanev1.AddToScheme(scheme) return &ExtensionHandlers{ // Add the apiGroups being handled to the decoder decoder: serializer.NewCodecFactory(scheme).UniversalDecoder( @@ -122,6 +128,7 @@ func (h *ExtensionHandlers) GeneratePatches(ctx context.Context, req *runtimehoo // an empty template without a set `.spec` and due to omitempty // `.spec.template.spec.controlPlaneEndpoint` does not exist. topologymutation.PatchFormat{Format: runtimehooksv1.JSONMergePatchType}, + topologymutation.FailForUnknownTypes{}, ) } @@ -363,7 +370,18 @@ func (h *ExtensionHandlers) DiscoverVariables(ctx context.Context, req *runtimeh log := ctrl.LoggerFrom(ctx) log.Info("DiscoverVariables called") - resp.Status = runtimehooksv1.ResponseStatusSuccess + vars := []clusterv1beta1.ClusterClassVariable{} - resp.Variables = clusterclass.GetClusterClassVariables(req.Settings["testMode"] == "govmomi") + for _, in := range clusterclass.GetClusterClassVariables(req.Settings["testMode"] == "govmomi") { + out := clusterv1beta1.ClusterClassVariable{} + if err := clusterv1beta1.Convert_v1beta2_ClusterClassVariable_To_v1beta1_ClusterClassVariable(&in, &out, nil); err != nil { + resp.Status = runtimehooksv1.ResponseStatusFailure + resp.Message = fmt.Sprintf("Failed to Convert ClusterClass variable %q to v1beta1", in.Name) + return + } + vars = append(resp.Variables, out) + } + + resp.Status = runtimehooksv1.ResponseStatusSuccess + resp.Variables = vars } diff --git a/test/extension/main.go b/test/extension/main.go index e81ef22243..af0a434a68 100644 --- a/test/extension/main.go +++ b/test/extension/main.go @@ -39,12 +39,12 @@ import ( logsv1 "k8s.io/component-base/logs/api/v1" _ "k8s.io/component-base/logs/json/register" "k8s.io/klog/v2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta2" + controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + runtimehooksv1 "sigs.k8s.io/cluster-api/api/runtime/hooks/v1alpha1" "sigs.k8s.io/cluster-api/controllers/remote" - controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" - runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" "sigs.k8s.io/cluster-api/exp/runtime/server" "sigs.k8s.io/cluster-api/test/extension/handlers/lifecycle" "sigs.k8s.io/cluster-api/util/apiwarnings" @@ -286,7 +286,7 @@ func setupTopologyMutationHookHandlers(runtimeExtensionWebhookServer *server.Ser // Create the ExtensionHandlers for the Topology Mutation Hooks. // NOTE: it is not mandatory to group all the ExtensionHandlers using a struct, what is important // is to have HandlerFunc with the signature defined in sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1. - topologyMutationExtensionHandlers := topologymutation.NewExtensionHandlers(scheme) + topologyMutationExtensionHandlers := topologymutation.NewExtensionHandlers() if err := runtimeExtensionWebhookServer.AddExtensionHandler(server.ExtensionHandler{ Hook: runtimehooksv1.GeneratePatches, diff --git a/test/framework/ip/addressmanager.go b/test/framework/ip/addressmanager.go index 88ffa61fb0..d93a7eda24 100644 --- a/test/framework/ip/addressmanager.go +++ b/test/framework/ip/addressmanager.go @@ -23,7 +23,7 @@ import ( "github.com/vmware/govmomi" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1alpha1" + ipamv1 "sigs.k8s.io/cluster-api/api/ipam/v1beta2" ) var ipamScheme *runtime.Scheme diff --git a/test/framework/ip/incluster.go b/test/framework/ip/incluster.go index 2cc993b7ef..b53ca8e255 100644 --- a/test/framework/ip/incluster.go +++ b/test/framework/ip/incluster.go @@ -40,7 +40,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog/v2" "k8s.io/utils/ptr" - ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" + ipamv1 "sigs.k8s.io/cluster-api/api/ipam/v1beta2" . "sigs.k8s.io/cluster-api/test/framework/ginkgoextensions" "sigs.k8s.io/controller-runtime/pkg/client" ) diff --git a/test/framework/log/collector.go b/test/framework/log/collector.go index 038c1e85b1..4fea927170 100644 --- a/test/framework/log/collector.go +++ b/test/framework/log/collector.go @@ -35,8 +35,7 @@ import ( "golang.org/x/crypto/ssh" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/klog/v2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client" vmwarev1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/vmware/v1beta1" @@ -53,7 +52,7 @@ type MachineLogCollector struct { Finder *find.Finder } -func (c *MachineLogCollector) CollectMachinePoolLog(_ context.Context, _ client.Client, _ *expv1.MachinePool, _ string) error { +func (c *MachineLogCollector) CollectMachinePoolLog(_ context.Context, _ client.Client, _ *clusterv1.MachinePool, _ string) error { return nil } diff --git a/test/framework/vcsim/cluster_proxy.go b/test/framework/vcsim/cluster_proxy.go index 2d785b29bc..f37b796833 100644 --- a/test/framework/vcsim/cluster_proxy.go +++ b/test/framework/vcsim/cluster_proxy.go @@ -27,8 +27,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/rest" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework" inmemoryproxy "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/server/proxy" "sigs.k8s.io/controller-runtime/pkg/client" @@ -107,7 +106,7 @@ type noopLogCollector struct{} func (*noopLogCollector) CollectMachineLog(_ context.Context, _ client.Client, _ *clusterv1.Machine, _ string) error { return nil } -func (*noopLogCollector) CollectMachinePoolLog(_ context.Context, _ client.Client, _ *expv1.MachinePool, _ string) error { +func (*noopLogCollector) CollectMachinePoolLog(_ context.Context, _ client.Client, _ *clusterv1.MachinePool, _ string) error { return nil } func (*noopLogCollector) CollectInfrastructureLogs(_ context.Context, _ client.Client, _ *clusterv1.Cluster, _ string) error { diff --git a/test/go.mod b/test/go.mod index a4de97a902..612dafe362 100644 --- a/test/go.mod +++ b/test/go.mod @@ -1,10 +1,10 @@ module sigs.k8s.io/cluster-api-provider-vsphere/test -go 1.23.0 +go 1.24.0 -replace sigs.k8s.io/cluster-api => sigs.k8s.io/cluster-api v1.10.1 +replace sigs.k8s.io/cluster-api => sigs.k8s.io/cluster-api v1.10.0-beta.0.0.20250527052821-4642e1497262 -replace sigs.k8s.io/cluster-api/test => sigs.k8s.io/cluster-api/test v1.10.1 +replace sigs.k8s.io/cluster-api/test => sigs.k8s.io/cluster-api/test v1.10.0-beta.0.0.20250527052821-4642e1497262 replace sigs.k8s.io/cluster-api-provider-vsphere => ../ @@ -26,35 +26,30 @@ require ( github.com/spf13/pflag v1.0.6 golang.org/x/crypto v0.38.0 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.32.4 - k8s.io/apiextensions-apiserver v0.32.4 - k8s.io/apimachinery v0.32.4 - k8s.io/client-go v0.32.4 - k8s.io/component-base v0.32.4 + k8s.io/api v0.33.1 + k8s.io/apiextensions-apiserver v0.33.1 + k8s.io/apimachinery v0.33.1 + k8s.io/client-go v0.33.1 + k8s.io/component-base v0.33.1 k8s.io/klog/v2 v2.130.1 k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 sigs.k8s.io/cluster-api v0.0.0-00010101000000-000000000000 sigs.k8s.io/cluster-api-provider-vsphere v0.0.0-00010101000000-000000000000 sigs.k8s.io/cluster-api/test v0.0.0-00010101000000-000000000000 - sigs.k8s.io/controller-runtime v0.20.4 + sigs.k8s.io/controller-runtime v0.21.0 sigs.k8s.io/yaml v1.4.0 ) require ( al.essio.dev/pkg/shellescape v1.5.1 // indirect - cel.dev/expr v0.18.0 // indirect - dario.cat/mergo v1.0.1 // indirect + cel.dev/expr v0.19.1 // indirect github.com/BurntSushi/toml v1.4.0 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect - github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver/v3 v3.3.0 // indirect - github.com/Masterminds/sprig/v3 v3.3.0 // indirect github.com/Microsoft/go-winio v0.5.0 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 // indirect github.com/adrg/xdg v0.5.3 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect - github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver v3.5.1+incompatible // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect @@ -64,7 +59,7 @@ require ( github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/docker v28.0.2+incompatible // indirect + github.com/docker/docker v28.1.1+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.4.0 // indirect github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 // indirect @@ -86,31 +81,28 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/cel-go v0.22.0 // indirect - github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/cel-go v0.23.2 // indirect + github.com/google/gnostic-models v0.6.9 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/go-github/v53 v53.2.0 // indirect github.com/google/go-querystring v1.1.0 // indirect - github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect - github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 // indirect github.com/google/uuid v1.6.0 // indirect - github.com/gorilla/websocket v1.5.3 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect - github.com/huandu/xstrings v1.5.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kr/text v0.2.0 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect - github.com/mitchellh/copystructure v1.2.0 // indirect - github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/spdystream v0.5.0 // indirect + github.com/moby/sys/sequential v0.6.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect @@ -120,60 +112,61 @@ require ( github.com/opencontainers/image-spec v1.0.2 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.2.3 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_golang v1.22.0 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/rivo/uniseg v0.4.2 // indirect github.com/sagikazarmark/locafero v0.7.0 // indirect - github.com/shopspring/decimal v1.4.0 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spf13/afero v1.12.0 // indirect github.com/spf13/cast v1.7.1 // indirect github.com/spf13/cobra v1.9.1 // indirect - github.com/spf13/viper v1.20.0 // indirect + github.com/spf13/viper v1.20.1 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/valyala/fastjson v1.6.4 // indirect github.com/vmware-tanzu/vm-operator/external/ncp v0.0.0-20240404200847-de75746a9505 // indirect github.com/x448/float16 v0.8.4 // indirect - go.etcd.io/etcd/api/v3 v3.5.20 // indirect - go.etcd.io/etcd/client/pkg/v3 v3.5.20 // indirect - go.etcd.io/etcd/client/v3 v3.5.20 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect - go.opentelemetry.io/otel v1.29.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.29.0 // indirect - go.opentelemetry.io/otel/sdk v1.29.0 // indirect - go.opentelemetry.io/otel/trace v1.29.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.etcd.io/etcd/api/v3 v3.5.21 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.21 // indirect + go.etcd.io/etcd/client/v3 v3.5.21 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect + go.opentelemetry.io/otel v1.33.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect + go.opentelemetry.io/otel/metric v1.33.0 // indirect + go.opentelemetry.io/otel/sdk v1.33.0 // indirect + go.opentelemetry.io/otel/trace v1.33.0 // indirect + go.opentelemetry.io/proto/otlp v1.4.0 // indirect go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect golang.org/x/net v0.40.0 // indirect - golang.org/x/oauth2 v0.28.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect golang.org/x/sync v0.14.0 // indirect golang.org/x/sys v0.33.0 // indirect golang.org/x/term v0.32.0 // indirect golang.org/x/text v0.25.0 // indirect - golang.org/x/time v0.8.0 // indirect + golang.org/x/time v0.9.0 // indirect golang.org/x/tools v0.33.0 // indirect gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 // indirect - google.golang.org/grpc v1.67.3 // indirect + google.golang.org/grpc v1.68.2 // indirect google.golang.org/protobuf v1.36.5 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiserver v0.32.4 // indirect - k8s.io/cluster-bootstrap v0.32.4 // indirect - k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect + k8s.io/apiserver v0.33.1 // indirect + k8s.io/cluster-bootstrap v0.33.1 // indirect + k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect - sigs.k8s.io/kind v0.27.0 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect + sigs.k8s.io/kind v0.29.0 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect ) diff --git a/test/go.sum b/test/go.sum index fd05263793..6760132ecd 100644 --- a/test/go.sum +++ b/test/go.sum @@ -1,7 +1,7 @@ al.essio.dev/pkg/shellescape v1.5.1 h1:86HrALUujYS/h+GtqoB26SBEdkWfmMI6FubjXlsXyho= al.essio.dev/pkg/shellescape v1.5.1/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= -cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= -cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cel.dev/expr v0.19.1 h1:NciYrtDRIR0lNCnH1LFJegdjspNx9fI59O7TWcua/W4= +cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= @@ -31,8 +31,6 @@ github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8 github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= -github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= @@ -66,8 +64,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v28.0.2+incompatible h1:9BILleFwug5FSSqWBgVevgL3ewDJfWWWyZVqlDMttE8= -github.com/docker/docker v28.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v28.1.1+incompatible h1:49M11BFLsVO1gxY9UX9p/zwkE/rswggs8AdFmXQw51I= +github.com/docker/docker v28.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= @@ -130,8 +128,8 @@ github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= -github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= +github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -140,10 +138,10 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= -github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8= -github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= -github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/cel-go v0.23.2 h1:UdEe3CvQh3Nv+E/j9r1Y//WO0K0cSyD7/y0bzyLIMI4= +github.com/google/cel-go v0.23.2/go.mod h1:52Pb6QsDbC5kvgxvZhiL9QX1oZEkcUF/ZqaPx1J5Wwo= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= @@ -160,24 +158,22 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= -github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 h1:SJ+NtwL6QaZ21U+IrK7d0gGgpjGGvd2kz+FzTHVzdqI= -github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= -github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= @@ -195,6 +191,8 @@ github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHm github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -203,6 +201,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= @@ -222,6 +222,10 @@ github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3N github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= +github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= +github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= +github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= +github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -266,19 +270,19 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.2 h1:YwD0ulJSJytLpiaWua0sBDusfsCZohxjxzVTYjwxfV8= github.com/rivo/uniseg v0.4.2/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo= github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k= @@ -301,13 +305,15 @@ github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzu github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.20.0 h1:zrxIyR3RQIOsarIrgL8+sAvALXul9jeEPa06Y0Ph6vY= -github.com/spf13/viper v1.20.0/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= +github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= +github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -341,40 +347,42 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= -go.etcd.io/etcd/api/v3 v3.5.20 h1:aKfz3nPZECWoZJXMSH9y6h2adXjtOHaHTGEVCuCmaz0= -go.etcd.io/etcd/api/v3 v3.5.20/go.mod h1:QqKGViq4KTgOG43dr/uH0vmGWIaoJY3ggFi6ZH0TH/U= -go.etcd.io/etcd/client/pkg/v3 v3.5.20 h1:sZIAtra+xCo56gdf6BR62to/hiie5Bwl7hQIqMzVTEM= -go.etcd.io/etcd/client/pkg/v3 v3.5.20/go.mod h1:qaOi1k4ZA9lVLejXNvyPABrVEe7VymMF2433yyRQ7O0= -go.etcd.io/etcd/client/v2 v2.305.16 h1:kQrn9o5czVNaukf2A2At43cE9ZtWauOtf9vRZuiKXow= -go.etcd.io/etcd/client/v2 v2.305.16/go.mod h1:h9YxWCzcdvZENbfzBTFCnoNumr2ax3F19sKMqHFmXHE= -go.etcd.io/etcd/client/v3 v3.5.20 h1:jMT2MwQEhyvhQg49Cec+1ZHJzfUf6ZgcmV0GjPv0tIQ= -go.etcd.io/etcd/client/v3 v3.5.20/go.mod h1:J5lbzYRMUR20YolS5UjlqqMcu3/wdEvG5VNBhzyo3m0= -go.etcd.io/etcd/pkg/v3 v3.5.16 h1:cnavs5WSPWeK4TYwPYfmcr3Joz9BH+TZ6qoUtz6/+mc= -go.etcd.io/etcd/pkg/v3 v3.5.16/go.mod h1:+lutCZHG5MBBFI/U4eYT5yL7sJfnexsoM20Y0t2uNuY= -go.etcd.io/etcd/raft/v3 v3.5.16 h1:zBXA3ZUpYs1AwiLGPafYAKKl/CORn/uaxYDwlNwndAk= -go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI= -go.etcd.io/etcd/server/v3 v3.5.16 h1:d0/SAdJ3vVsZvF8IFVb1k8zqMZ+heGcNfft71ul9GWE= -go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= -go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= -go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= +go.etcd.io/etcd/api/v3 v3.5.21 h1:A6O2/JDb3tvHhiIz3xf9nJ7REHvtEFJJ3veW3FbCnS8= +go.etcd.io/etcd/api/v3 v3.5.21/go.mod h1:c3aH5wcvXv/9dqIw2Y810LDXJfhSYdHQ0vxmP3CCHVY= +go.etcd.io/etcd/client/pkg/v3 v3.5.21 h1:lPBu71Y7osQmzlflM9OfeIV2JlmpBjqBNlLtcoBqUTc= +go.etcd.io/etcd/client/pkg/v3 v3.5.21/go.mod h1:BgqT/IXPjK9NkeSDjbzwsHySX3yIle2+ndz28nVsjUs= +go.etcd.io/etcd/client/v2 v2.305.21 h1:eLiFfexc2mE+pTLz9WwnoEsX5JTTpLCYVivKkmVXIRA= +go.etcd.io/etcd/client/v2 v2.305.21/go.mod h1:OKkn4hlYNf43hpjEM3Ke3aRdUkhSl8xjKjSf8eCq2J8= +go.etcd.io/etcd/client/v3 v3.5.21 h1:T6b1Ow6fNjOLOtM0xSoKNQt1ASPCLWrF9XMHcH9pEyY= +go.etcd.io/etcd/client/v3 v3.5.21/go.mod h1:mFYy67IOqmbRf/kRUvsHixzo3iG+1OF2W2+jVIQRAnU= +go.etcd.io/etcd/pkg/v3 v3.5.21 h1:jUItxeKyrDuVuWhdh0HtjUANwyuzcb7/FAeUfABmQsk= +go.etcd.io/etcd/pkg/v3 v3.5.21/go.mod h1:wpZx8Egv1g4y+N7JAsqi2zoUiBIUWznLjqJbylDjWgU= +go.etcd.io/etcd/raft/v3 v3.5.21 h1:dOmE0mT55dIUsX77TKBLq+RgyumsQuYeiRQnW/ylugk= +go.etcd.io/etcd/raft/v3 v3.5.21/go.mod h1:fmcuY5R2SNkklU4+fKVBQi2biVp5vafMrWUEj4TJ4Cs= +go.etcd.io/etcd/server/v3 v3.5.21 h1:9w0/k12majtgarGmlMVuhwXRI2ob3/d1Ik3X5TKo0yU= +go.etcd.io/etcd/server/v3 v3.5.21/go.mod h1:G1mOzdwuzKT1VRL7SqRchli/qcFrtLBTAQ4lV20sXXo= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0 h1:FyjCyI9jVEfqhUh2MoSkmolPjfh5fp2hnV0b0irxH4Q= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0/go.mod h1:hYwym2nDEeZfG/motx0p7L7J1N1vyzIThemQsb4g2qY= -go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= -go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= -go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= -go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= -go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= -go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= +go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= +go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= @@ -403,8 +411,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= -golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc= -golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -435,8 +443,8 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= -golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= -golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -457,8 +465,8 @@ google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1: google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 h1:TqExAhdPaB60Ux47Cn0oLV07rGnxZzIsaRhQaqS666A= google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA= -google.golang.org/grpc v1.67.3 h1:OgPcDAFKHnH8X3O4WcO4XUc8GRDeKsKReqbQtiCj7N8= -google.golang.org/grpc v1.67.3/go.mod h1:YGaHCc6Oap+FzBJTZLBzkGSYt/cvGPFTPxkn7QfSU8s= +google.golang.org/grpc v1.68.2 h1:EWN8x60kqfCcBXzbfPpEezgdYRZA9JCxtySmCtTUs2E= +google.golang.org/grpc v1.68.2/go.mod h1:AOXp0/Lj+nW5pJEgw8KQ6L1Ka+NTyJOABlSgfCrCN5A= google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -488,46 +496,49 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= -k8s.io/api v0.32.4 h1:kw8Y/G8E7EpNy7gjB8gJZl3KJkNz8HM2YHrZPtAZsF4= -k8s.io/api v0.32.4/go.mod h1:5MYFvLvweRhyKylM3Es/6uh/5hGp0dg82vP34KifX4g= -k8s.io/apiextensions-apiserver v0.32.4 h1:IA+CoR63UDOijR/vEpow6wQnX4V6iVpzazJBskHrpHE= -k8s.io/apiextensions-apiserver v0.32.4/go.mod h1:Y06XO/b92H8ymOdG1HlA1submf7gIhbEDc3RjriqZOs= +k8s.io/api v0.33.1 h1:tA6Cf3bHnLIrUK4IqEgb2v++/GYUtqiu9sRVk3iBXyw= +k8s.io/api v0.33.1/go.mod h1:87esjTn9DRSRTD4fWMXamiXxJhpOIREjWOSjsW1kEHw= +k8s.io/apiextensions-apiserver v0.33.1 h1:N7ccbSlRN6I2QBcXevB73PixX2dQNIW0ZRuguEE91zI= +k8s.io/apiextensions-apiserver v0.33.1/go.mod h1:uNQ52z1A1Gu75QSa+pFK5bcXc4hq7lpOXbweZgi4dqA= k8s.io/apimachinery v0.17.4/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g= -k8s.io/apimachinery v0.32.4 h1:8EEksaxA7nd7xWJkkwLDN4SvWS5ot9g6Z/VZb3ju25I= -k8s.io/apimachinery v0.32.4/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/apiserver v0.32.4 h1:Yf7sd/y+GOQKH1Qf6wUeayZrYXe2SKZ17Bcq7VQM5HQ= -k8s.io/apiserver v0.32.4/go.mod h1:JFUMNtE2M5yqLZpIsgCb06SkVSW1YcxW1oyLSTfjXR8= -k8s.io/client-go v0.32.4 h1:zaGJS7xoYOYumoWIFXlcVrsiYioRPrXGO7dBfVC5R6M= -k8s.io/client-go v0.32.4/go.mod h1:k0jftcyYnEtwlFW92xC7MTtFv5BNcZBr+zn9jPlT9Ic= -k8s.io/cluster-bootstrap v0.32.4 h1:QO2rZ1KDLHaa3WKgpF3P26/5AKLFViMt0jUJptylCgs= -k8s.io/cluster-bootstrap v0.32.4/go.mod h1:+O5BK2t/VxGXcPPOn+SlpFrC0x78nnW6jnPI2MRhdz8= -k8s.io/component-base v0.32.4 h1:HuF+2JVLbFS5GODLIfPCb1Td6b+G2HszJoArcWOSr5I= -k8s.io/component-base v0.32.4/go.mod h1:10KloJEYw1keU/Xmjfy9TKJqUq7J2mYdiD1VDXoco4o= +k8s.io/apimachinery v0.33.1 h1:mzqXWV8tW9Rw4VeW9rEkqvnxj59k1ezDUl20tFK/oM4= +k8s.io/apimachinery v0.33.1/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/apiserver v0.33.1 h1:yLgLUPDVC6tHbNcw5uE9mo1T6ELhJj7B0geifra3Qdo= +k8s.io/apiserver v0.33.1/go.mod h1:VMbE4ArWYLO01omz+k8hFjAdYfc3GVAYPrhP2tTKccs= +k8s.io/client-go v0.33.1 h1:ZZV/Ks2g92cyxWkRRnfUDsnhNn28eFpt26aGc8KbXF4= +k8s.io/client-go v0.33.1/go.mod h1:JAsUrl1ArO7uRVFWfcj6kOomSlCv+JpvIsp6usAGefA= +k8s.io/cluster-bootstrap v0.33.1 h1:esGY+qXFJ78myppBzMVqqj37ReGLOJpQNslRiqmQGes= +k8s.io/cluster-bootstrap v0.33.1/go.mod h1:YA4FsgPShsVoP84DkBJEkCKDgsH4PpgTa0NzNBf6y4I= +k8s.io/component-base v0.33.1 h1:EoJ0xA+wr77T+G8p6T3l4efT2oNwbqBVKR71E0tBIaI= +k8s.io/component-base v0.33.1/go.mod h1:guT/w/6piyPfTgq7gfvgetyXMIh10zuXA6cRRm3rDuY= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= -sigs.k8s.io/cluster-api v1.10.1 h1:5vsLNgQ4SkPudJ1USK532B0SIdJxRsCNKt2DZtBf+ww= -sigs.k8s.io/cluster-api v1.10.1/go.mod h1:aiPMrNPoaJc/GuJ4TCpWX8bVe11+iCJ4HI0f3c9QiJg= -sigs.k8s.io/cluster-api/test v1.10.1 h1:cSgbfROhT42+M1YQg6Wj5DQWNjRZtkFQGIDSu6JJjmk= -sigs.k8s.io/cluster-api/test v1.10.1/go.mod h1:CYFajf4HhV5zhoiPl+m5Omk0a3WtnyXHtFVPqWIcUy4= -sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= -sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +sigs.k8s.io/cluster-api v1.10.0-beta.0.0.20250527052821-4642e1497262 h1:GcAHcaAycWzdtTRwW/rcQwcABVoCxyh+lJ6l2paqMoU= +sigs.k8s.io/cluster-api v1.10.0-beta.0.0.20250527052821-4642e1497262/go.mod h1:UmipfrOBTqjRNX7X4zuJCInq28/Fh6xq9RklOJ/DMR4= +sigs.k8s.io/cluster-api/test v1.10.0-beta.0.0.20250527052821-4642e1497262 h1:DCj/L/gmmi6q0pB8LY5k+2Yw69/T3CnZxs9Wn5k3vaY= +sigs.k8s.io/cluster-api/test v1.10.0-beta.0.0.20250527052821-4642e1497262/go.mod h1:jXu1hsMVlAHHBtKbRwxtBEkUR0q+f5/eiSNOXYHR1Lk= +sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= +sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= -sigs.k8s.io/kind v0.27.0 h1:PQ3f0iAWNIj66LYkZ1ivhEg/+Zb6UPMbO+qVei/INZA= -sigs.k8s.io/kind v0.27.0/go.mod h1:RZVFmy6qcwlSWwp6xeIUv7kXCPF3i8MXsEXxW/J+gJY= +sigs.k8s.io/kind v0.29.0 h1:3TpCsyh908IkXXpcSnsMjWdwdWjIl7o9IMZImZCWFnI= +sigs.k8s.io/kind v0.29.0/go.mod h1:ldWQisw2NYyM6k64o/tkZng/1qQW7OlzcN5a8geJX3o= +sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/test/infrastructure/net-operator/controllers/networkinterface_controller.go b/test/infrastructure/net-operator/controllers/networkinterface_controller.go index 02089c6645..e3917fef73 100644 --- a/test/infrastructure/net-operator/controllers/networkinterface_controller.go +++ b/test/infrastructure/net-operator/controllers/networkinterface_controller.go @@ -24,7 +24,7 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" kerrors "k8s.io/apimachinery/pkg/util/errors" - "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" diff --git a/test/infrastructure/net-operator/main.go b/test/infrastructure/net-operator/main.go index 02413372df..6aec2eed22 100644 --- a/test/infrastructure/net-operator/main.go +++ b/test/infrastructure/net-operator/main.go @@ -38,7 +38,7 @@ import ( "k8s.io/component-base/logs" logsv1 "k8s.io/component-base/logs/api/v1" "k8s.io/klog/v2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/controllers/remote" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util/apiwarnings" diff --git a/test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_controlplaneendpoints.yaml b/test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_controlplaneendpoints.yaml index 4549dbccc4..20a0b7b8e4 100644 --- a/test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_controlplaneendpoints.yaml +++ b/test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_controlplaneendpoints.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.0 + controller-gen.kubebuilder.io/version: v0.18.0 name: controlplaneendpoints.vcsim.infrastructure.cluster.x-k8s.io spec: group: vcsim.infrastructure.cluster.x-k8s.io diff --git a/test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_envvars.yaml b/test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_envvars.yaml index ded96fd249..b994ddd00a 100644 --- a/test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_envvars.yaml +++ b/test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_envvars.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.0 + controller-gen.kubebuilder.io/version: v0.18.0 name: envvars.vcsim.infrastructure.cluster.x-k8s.io spec: group: vcsim.infrastructure.cluster.x-k8s.io diff --git a/test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_vcentersimulators.yaml b/test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_vcentersimulators.yaml index 951502c2f2..9f9b75cdea 100644 --- a/test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_vcentersimulators.yaml +++ b/test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_vcentersimulators.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.0 + controller-gen.kubebuilder.io/version: v0.18.0 name: vcentersimulators.vcsim.infrastructure.cluster.x-k8s.io spec: group: vcsim.infrastructure.cluster.x-k8s.io diff --git a/test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_vmoperatordependencies.yaml b/test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_vmoperatordependencies.yaml index d0982485ef..4d397cd9a7 100644 --- a/test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_vmoperatordependencies.yaml +++ b/test/infrastructure/vcsim/config/crd/bases/vcsim.infrastructure.cluster.x-k8s.io_vmoperatordependencies.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.17.0 + controller-gen.kubebuilder.io/version: v0.18.0 name: vmoperatordependencies.vcsim.infrastructure.cluster.x-k8s.io spec: group: vcsim.infrastructure.cluster.x-k8s.io diff --git a/test/infrastructure/vcsim/controllers/controlplaneendpoint_controller.go b/test/infrastructure/vcsim/controllers/controlplaneendpoint_controller.go index 934e5ea736..75fc739827 100644 --- a/test/infrastructure/vcsim/controllers/controlplaneendpoint_controller.go +++ b/test/infrastructure/vcsim/controllers/controlplaneendpoint_controller.go @@ -25,8 +25,8 @@ import ( "k8s.io/klog/v2" inmemoryruntime "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/runtime" inmemoryserver "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/server" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/cluster-api/util/finalizers" - "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" diff --git a/test/infrastructure/vcsim/controllers/envvar_controller.go b/test/infrastructure/vcsim/controllers/envvar_controller.go index 8891602bec..d0318841f0 100644 --- a/test/infrastructure/vcsim/controllers/envvar_controller.go +++ b/test/infrastructure/vcsim/controllers/envvar_controller.go @@ -33,7 +33,7 @@ import ( kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/klog/v2" "k8s.io/utils/ptr" - "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" diff --git a/test/infrastructure/vcsim/controllers/vcsim_controller.go b/test/infrastructure/vcsim/controllers/vcsim_controller.go index fea2ddb4ae..9e6ee8bad5 100644 --- a/test/infrastructure/vcsim/controllers/vcsim_controller.go +++ b/test/infrastructure/vcsim/controllers/vcsim_controller.go @@ -44,8 +44,8 @@ import ( kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/klog/v2" "k8s.io/utils/ptr" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/cluster-api/util/finalizers" - "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" diff --git a/test/infrastructure/vcsim/controllers/virtualmachine_controller.go b/test/infrastructure/vcsim/controllers/virtualmachine_controller.go index e0dbcb38d7..a2e4ff0ba0 100644 --- a/test/infrastructure/vcsim/controllers/virtualmachine_controller.go +++ b/test/infrastructure/vcsim/controllers/virtualmachine_controller.go @@ -28,13 +28,12 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/klog/v2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" inmemoryruntime "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/runtime" inmemoryserver "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/server" - capiutil "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/cluster-api/util/finalizers" - "sigs.k8s.io/cluster-api/util/patch" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -98,7 +97,7 @@ func (r *VirtualMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reque ctx = ctrl.LoggerInto(ctx, log) // Fetch the Machine. - machine, err := capiutil.GetOwnerMachine(ctx, r.Client, vSphereMachine.ObjectMeta) + machine, err := getOwnerMachineV1Beta1(ctx, r.Client, vSphereMachine.ObjectMeta) if err != nil { return ctrl.Result{}, err } @@ -110,20 +109,20 @@ func (r *VirtualMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reque ctx = ctrl.LoggerInto(ctx, log) // Fetch the Cluster. - cluster, err := capiutil.GetClusterFromMetadata(ctx, r.Client, machine.ObjectMeta) + cluster, err := getClusterV1Beta1FromMetadata(ctx, r.Client, machine.ObjectMeta) if err != nil { log.Info("VSphereMachine owner Machine is missing cluster label or cluster does not exist") return ctrl.Result{}, err } if cluster == nil { - log.Info(fmt.Sprintf("Please associate this machine with a cluster using the label %s: ", clusterv1.ClusterNameLabel)) + log.Info(fmt.Sprintf("Please associate this machine with a cluster using the label %s: ", clusterv1beta1.ClusterNameLabel)) return ctrl.Result{}, nil } log = log.WithValues("Cluster", klog.KObj(cluster)) ctx = ctrl.LoggerInto(ctx, log) // Return early if the object or Cluster is paused. - if annotations.IsPaused(cluster, virtualMachine) { + if cluster.Spec.Paused || annotations.HasPaused(virtualMachine) { log.Info("Reconciliation is paused for this object") return ctrl.Result{}, nil } @@ -246,7 +245,7 @@ func (r *VirtualMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reque return r.reconcileNormal(ctx, cluster, machine, virtualMachine, conditionsTracker) } -func (r *VirtualMachineReconciler) reconcileNormal(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, virtualMachine *vmoprv1.VirtualMachine, conditionsTracker *infrav1.VSphereVM) (ctrl.Result, error) { +func (r *VirtualMachineReconciler) reconcileNormal(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, virtualMachine *vmoprv1.VirtualMachine, conditionsTracker *infrav1.VSphereVM) (ctrl.Result, error) { ipReconciler := r.getVMIpReconciler(cluster, virtualMachine) if ret, err := ipReconciler.ReconcileIP(ctx); !ret.IsZero() || err != nil { return ret, err @@ -260,7 +259,7 @@ func (r *VirtualMachineReconciler) reconcileNormal(ctx context.Context, cluster return ctrl.Result{}, nil } -func (r *VirtualMachineReconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, virtualMachine *vmoprv1.VirtualMachine, conditionsTracker *infrav1.VSphereVM) (ctrl.Result, error) { +func (r *VirtualMachineReconciler) reconcileDelete(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, virtualMachine *vmoprv1.VirtualMachine, conditionsTracker *infrav1.VSphereVM) (ctrl.Result, error) { bootstrapReconciler := r.getVMBootstrapReconciler(virtualMachine) if ret, err := bootstrapReconciler.reconcileDelete(ctx, cluster, machine, conditionsTracker); !ret.IsZero() || err != nil { return ret, err @@ -270,7 +269,7 @@ func (r *VirtualMachineReconciler) reconcileDelete(ctx context.Context, cluster return ctrl.Result{}, nil } -func (r *VirtualMachineReconciler) getVMIpReconciler(cluster *clusterv1.Cluster, virtualMachine *vmoprv1.VirtualMachine) *vmIPReconciler { +func (r *VirtualMachineReconciler) getVMIpReconciler(cluster *clusterv1beta1.Cluster, virtualMachine *vmoprv1.VirtualMachine) *vmIPReconciler { return &vmIPReconciler{ Client: r.Client, diff --git a/test/infrastructure/vcsim/controllers/virtualmachine_controller_test.go b/test/infrastructure/vcsim/controllers/virtualmachine_controller_test.go index b973f21fd3..96121e4793 100644 --- a/test/infrastructure/vcsim/controllers/virtualmachine_controller_test.go +++ b/test/infrastructure/vcsim/controllers/virtualmachine_controller_test.go @@ -27,10 +27,11 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" inmemoryruntime "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/runtime" inmemoryserver "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/server" - "sigs.k8s.io/cluster-api/util/conditions" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -159,9 +160,9 @@ func Test_Reconcile_VirtualMachine(t *testing.T) { err = inmemoryClient.Get(ctx, client.ObjectKeyFromObject(virtualMachine), conditionsTracker) g.Expect(err).ToNot(HaveOccurred()) - c := conditions.Get(conditionsTracker, VMProvisionedCondition) + c := deprecatedconditions.Get(conditionsTracker, VMProvisionedCondition) g.Expect(c.Status).To(Equal(corev1.ConditionFalse)) - g.Expect(c.Severity).To(Equal(clusterv1.ConditionSeverityInfo)) + g.Expect(c.Severity).To(Equal(clusterv1beta1.ConditionSeverityInfo)) g.Expect(c.Reason).To(Equal(WaitingControlPlaneInitializedReason)) }) @@ -299,7 +300,7 @@ func Test_Reconcile_VirtualMachine(t *testing.T) { err = inmemoryClient.Get(ctx, client.ObjectKeyFromObject(virtualMachine), conditionsTracker) g.Expect(err).ToNot(HaveOccurred()) - c := conditions.Get(conditionsTracker, NodeProvisionedCondition) + c := deprecatedconditions.Get(conditionsTracker, NodeProvisionedCondition) g.Expect(c.Status).To(Equal(corev1.ConditionTrue)) }) } diff --git a/test/infrastructure/vcsim/controllers/vmbootstrap_controller.go b/test/infrastructure/vcsim/controllers/vmbootstrap_controller.go index e1298ccfc9..bed7f0f41c 100644 --- a/test/infrastructure/vcsim/controllers/vmbootstrap_controller.go +++ b/test/infrastructure/vcsim/controllers/vmbootstrap_controller.go @@ -32,12 +32,12 @@ import ( kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog/v2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" inmemoryruntime "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/runtime" inmemoryserver "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/server" capiutil "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/certs" - "sigs.k8s.io/cluster-api/util/conditions" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/secret" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -53,7 +53,7 @@ import ( const ( // VMProvisionedCondition documents the status of VM provisioning, // which includes the VM being provisioned and with a boostrap secret available. - VMProvisionedCondition clusterv1.ConditionType = "VMProvisioned" + VMProvisionedCondition clusterv1beta1.ConditionType = "VMProvisioned" // WaitingForVMInfrastructureReason (Severity=Info) documents provisioning waiting for the VM // infrastructure to be ready. @@ -75,7 +75,7 @@ var ( // TODO: make this configurable const ( // NodeProvisionedCondition documents the status of the provisioning of the Kubernetes node. - NodeProvisionedCondition clusterv1.ConditionType = "NodeProvisioned" + NodeProvisionedCondition clusterv1beta1.ConditionType = "NodeProvisioned" // NodeWaitingForStartupTimeoutReason (Severity=Info) documents the Kubernetes Node provisioning. NodeWaitingForStartupTimeoutReason = "WaitingForStartupTimeout" @@ -88,7 +88,7 @@ var ( // TODO: make this configurable const ( // EtcdProvisionedCondition documents the status of the provisioning of the etcd member. - EtcdProvisionedCondition clusterv1.ConditionType = "EtcdProvisioned" + EtcdProvisionedCondition clusterv1beta1.ConditionType = "EtcdProvisioned" // EtcdWaitingForStartupTimeoutReason (Severity=Info) documents the etcd pod provisioning. EtcdWaitingForStartupTimeoutReason = "WaitingForStartupTimeout" @@ -101,7 +101,7 @@ var ( // TODO: make this configurable const ( // APIServerProvisionedCondition documents the status of the provisioning of the APIServer instance. - APIServerProvisionedCondition clusterv1.ConditionType = "APIServerProvisioned" + APIServerProvisionedCondition clusterv1beta1.ConditionType = "APIServerProvisioned" // APIServerWaitingForStartupTimeoutReason (Severity=Info) documents the API server pod provisioning. APIServerWaitingForStartupTimeoutReason = "WaitingForStartupTimeout" @@ -132,8 +132,8 @@ const ( type ConditionsTracker interface { client.Object - conditions.Getter - conditions.Setter + deprecatedconditions.Getter + deprecatedconditions.Setter } type vmBootstrapReconciler struct { @@ -145,23 +145,23 @@ type vmBootstrapReconciler struct { GetProviderID func() string } -func (r *vmBootstrapReconciler) reconcileBoostrap(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { +func (r *vmBootstrapReconciler) reconcileBoostrap(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) - if !conditions.Has(conditionsTracker, VMProvisionedCondition) { - conditions.MarkFalse(conditionsTracker, VMProvisionedCondition, WaitingForVMInfrastructureReason, clusterv1.ConditionSeverityInfo, "") + if !deprecatedconditions.Has(conditionsTracker, VMProvisionedCondition) { + deprecatedconditions.MarkFalse(conditionsTracker, VMProvisionedCondition, WaitingForVMInfrastructureReason, clusterv1beta1.ConditionSeverityInfo, "") } // Make sure bootstrap data is available and populated. // NOTE: we are not using bootstrap data, but we wait for it in order to simulate a real machine provisioning workflow. if machine.Spec.Bootstrap.DataSecretName == nil { - if !util.IsControlPlaneMachine(machine) && !conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { - conditions.MarkFalse(conditionsTracker, VMProvisionedCondition, WaitingControlPlaneInitializedReason, clusterv1.ConditionSeverityInfo, "") + if !util.IsControlPlaneMachine(machine) && !deprecatedconditions.IsTrue(cluster, clusterv1beta1.ControlPlaneInitializedCondition) { + deprecatedconditions.MarkFalse(conditionsTracker, VMProvisionedCondition, WaitingControlPlaneInitializedReason, clusterv1beta1.ConditionSeverityInfo, "") log.Info("Waiting for the control plane to be initialized") return reconcile.Result{RequeueAfter: 5 * time.Second}, nil // keep requeueing since we don't have a watch on machines // TODO: check if we can avoid this } - conditions.MarkFalse(conditionsTracker, VMProvisionedCondition, WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "") + deprecatedconditions.MarkFalse(conditionsTracker, VMProvisionedCondition, WaitingForBootstrapDataReason, clusterv1beta1.ConditionSeverityInfo, "") log.Info("Waiting for the Bootstrap provider controller to set bootstrap data") return reconcile.Result{RequeueAfter: 5 * time.Second}, nil // keep requeueing since we don't have a watch on machines // TODO: check if we can avoid this } @@ -171,12 +171,12 @@ func (r *vmBootstrapReconciler) reconcileBoostrap(ctx context.Context, cluster * log.Info("Waiting for machine infrastructure to become ready") return reconcile.Result{}, nil // TODO: check if we can avoid this } - if !conditions.IsTrue(conditionsTracker, VMProvisionedCondition) { - conditions.MarkTrue(conditionsTracker, VMProvisionedCondition) + if !deprecatedconditions.IsTrue(conditionsTracker, VMProvisionedCondition) { + deprecatedconditions.MarkTrue(conditionsTracker, VMProvisionedCondition) } // Call the inner reconciliation methods. - phases := []func(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error){ + phases := []func(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error){ r.reconcileBoostrapNode, r.reconcileBoostrapETCD, r.reconcileBoostrapAPIServer, @@ -202,17 +202,17 @@ func (r *vmBootstrapReconciler) reconcileBoostrap(ctx context.Context, cluster * return res, kerrors.NewAggregate(errs) } -func (r *vmBootstrapReconciler) reconcileBoostrapNode(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { +func (r *vmBootstrapReconciler) reconcileBoostrapNode(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) nodeName := conditionsTracker.GetName() provisioningDuration := nodeStartupDuration provisioningDuration += time.Duration(rand.Float64() * nodeStartupJitter * float64(provisioningDuration)) //nolint:gosec // Intentionally using a weak random number generator here. - start := conditions.Get(conditionsTracker, VMProvisionedCondition).LastTransitionTime + start := deprecatedconditions.Get(conditionsTracker, VMProvisionedCondition).LastTransitionTime now := time.Now() if now.Before(start.Add(provisioningDuration)) { - conditions.MarkFalse(conditionsTracker, NodeProvisionedCondition, NodeWaitingForStartupTimeoutReason, clusterv1.ConditionSeverityInfo, "") + deprecatedconditions.MarkFalse(conditionsTracker, NodeProvisionedCondition, NodeWaitingForStartupTimeoutReason, clusterv1beta1.ConditionSeverityInfo, "") remainingTime := start.Add(provisioningDuration).Sub(now) log.Info("Waiting for Node to start", "Start", start, "Duration", provisioningDuration, "RemainingTime", remainingTime, "Node", nodeName) return ctrl.Result{RequeueAfter: remainingTime}, nil @@ -283,11 +283,11 @@ func (r *vmBootstrapReconciler) reconcileBoostrapNode(ctx context.Context, clust log.Info("Node created", "Node", klog.KObj(node)) } - conditions.MarkTrue(conditionsTracker, NodeProvisionedCondition) + deprecatedconditions.MarkTrue(conditionsTracker, NodeProvisionedCondition) return ctrl.Result{}, nil } -func (r *vmBootstrapReconciler) reconcileBoostrapETCD(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { +func (r *vmBootstrapReconciler) reconcileBoostrapETCD(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) etcdMember := fmt.Sprintf("etcd-%s", conditionsTracker.GetName()) @@ -297,7 +297,7 @@ func (r *vmBootstrapReconciler) reconcileBoostrapETCD(ctx context.Context, clust } // No-op if the Node is not provisioned yet - if !conditions.IsTrue(conditionsTracker, NodeProvisionedCondition) { + if !deprecatedconditions.IsTrue(conditionsTracker, NodeProvisionedCondition) { return ctrl.Result{}, nil } @@ -305,10 +305,10 @@ func (r *vmBootstrapReconciler) reconcileBoostrapETCD(ctx context.Context, clust provisioningDuration := etcdStartupDuration provisioningDuration += time.Duration(rand.Float64() * etcdStartupJitter * float64(provisioningDuration)) //nolint:gosec // Intentionally using a weak random number generator here. - start := conditions.Get(conditionsTracker, NodeProvisionedCondition).LastTransitionTime + start := deprecatedconditions.Get(conditionsTracker, NodeProvisionedCondition).LastTransitionTime now := time.Now() if now.Before(start.Add(provisioningDuration)) { - conditions.MarkFalse(conditionsTracker, EtcdProvisionedCondition, EtcdWaitingForStartupTimeoutReason, clusterv1.ConditionSeverityInfo, "") + deprecatedconditions.MarkFalse(conditionsTracker, EtcdProvisionedCondition, EtcdWaitingForStartupTimeoutReason, clusterv1beta1.ConditionSeverityInfo, "") remainingTime := start.Add(provisioningDuration).Sub(now) log.Info("Waiting for etcd Pod to start", "Start", start, "Duration", provisioningDuration, "RemainingTime", remainingTime, "Pod", klog.KRef(metav1.NamespaceSystem, etcdMember)) return ctrl.Result{RequeueAfter: remainingTime}, nil @@ -427,11 +427,11 @@ func (r *vmBootstrapReconciler) reconcileBoostrapETCD(ctx context.Context, clust log.Info("etcd Pod started", "Pod", klog.KObj(etcdPod)) } - conditions.MarkTrue(conditionsTracker, EtcdProvisionedCondition) + deprecatedconditions.MarkTrue(conditionsTracker, EtcdProvisionedCondition) return ctrl.Result{}, nil } -func (r *vmBootstrapReconciler) reconcileBoostrapAPIServer(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { +func (r *vmBootstrapReconciler) reconcileBoostrapAPIServer(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) apiServer := fmt.Sprintf("kube-apiserver-%s", conditionsTracker.GetName()) @@ -441,7 +441,7 @@ func (r *vmBootstrapReconciler) reconcileBoostrapAPIServer(ctx context.Context, } // No-op if the Node is not provisioned yet - if !conditions.IsTrue(conditionsTracker, NodeProvisionedCondition) { + if !deprecatedconditions.IsTrue(conditionsTracker, NodeProvisionedCondition) { return ctrl.Result{}, nil } @@ -449,10 +449,10 @@ func (r *vmBootstrapReconciler) reconcileBoostrapAPIServer(ctx context.Context, provisioningDuration := apiServerStartupDuration provisioningDuration += time.Duration(rand.Float64() * apiServerStartupJitter * float64(provisioningDuration)) //nolint:gosec // Intentionally using a weak random number generator here. - start := conditions.Get(conditionsTracker, NodeProvisionedCondition).LastTransitionTime + start := deprecatedconditions.Get(conditionsTracker, NodeProvisionedCondition).LastTransitionTime now := time.Now() if now.Before(start.Add(provisioningDuration)) { - conditions.MarkFalse(conditionsTracker, APIServerProvisionedCondition, APIServerWaitingForStartupTimeoutReason, clusterv1.ConditionSeverityInfo, "") + deprecatedconditions.MarkFalse(conditionsTracker, APIServerProvisionedCondition, APIServerWaitingForStartupTimeoutReason, clusterv1beta1.ConditionSeverityInfo, "") remainingTime := start.Add(provisioningDuration).Sub(now) log.Info("Waiting for API server Pod to start", "Start", start, "Duration", provisioningDuration, "RemainingTime", remainingTime, "Pod", klog.KRef(metav1.NamespaceSystem, apiServer)) return ctrl.Result{RequeueAfter: remainingTime}, nil @@ -536,11 +536,11 @@ func (r *vmBootstrapReconciler) reconcileBoostrapAPIServer(ctx context.Context, log.Info("API server Pod started", "Pod", klog.KObj(apiServerPod)) } - conditions.MarkTrue(conditionsTracker, APIServerProvisionedCondition) + deprecatedconditions.MarkTrue(conditionsTracker, APIServerProvisionedCondition) return ctrl.Result{}, nil } -func (r *vmBootstrapReconciler) reconcileBoostrapScheduler(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { +func (r *vmBootstrapReconciler) reconcileBoostrapScheduler(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { // No-op if the machine is not a control plane machine. if !util.IsControlPlaneMachine(machine) { return ctrl.Result{}, nil @@ -550,7 +550,7 @@ func (r *vmBootstrapReconciler) reconcileBoostrapScheduler(ctx context.Context, // specific behaviour for this component because they are not relevant for stress tests. // As a current approximation, we create the scheduler as soon as the API server is provisioned; // also, the scheduler is immediately marked as ready. - if !conditions.IsTrue(conditionsTracker, APIServerProvisionedCondition) { + if !deprecatedconditions.IsTrue(conditionsTracker, APIServerProvisionedCondition) { return ctrl.Result{}, nil } @@ -587,7 +587,7 @@ func (r *vmBootstrapReconciler) reconcileBoostrapScheduler(ctx context.Context, return ctrl.Result{}, nil } -func (r *vmBootstrapReconciler) reconcileBoostrapControllerManager(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { +func (r *vmBootstrapReconciler) reconcileBoostrapControllerManager(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { // No-op if the machine is not a control plane machine. if !util.IsControlPlaneMachine(machine) { return ctrl.Result{}, nil @@ -597,7 +597,7 @@ func (r *vmBootstrapReconciler) reconcileBoostrapControllerManager(ctx context.C // specific behaviour for this component because they are not relevant for stress tests. // As a current approximation, we create the controller manager as soon as the API server is provisioned; // also, the controller manager is immediately marked as ready. - if !conditions.IsTrue(conditionsTracker, APIServerProvisionedCondition) { + if !deprecatedconditions.IsTrue(conditionsTracker, APIServerProvisionedCondition) { return ctrl.Result{}, nil } @@ -634,7 +634,7 @@ func (r *vmBootstrapReconciler) reconcileBoostrapControllerManager(ctx context.C return ctrl.Result{}, nil } -func (r *vmBootstrapReconciler) reconcileBoostrapKubeadmObjects(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, _ ConditionsTracker) (ctrl.Result, error) { +func (r *vmBootstrapReconciler) reconcileBoostrapKubeadmObjects(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, _ ConditionsTracker) (ctrl.Result, error) { // No-op if the machine is not a control plane machine. if !util.IsControlPlaneMachine(machine) { return ctrl.Result{}, nil @@ -701,7 +701,7 @@ func (r *vmBootstrapReconciler) reconcileBoostrapKubeadmObjects(ctx context.Cont return ctrl.Result{}, nil } -func (r *vmBootstrapReconciler) reconcileBoostrapKubeProxy(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, _ ConditionsTracker) (ctrl.Result, error) { +func (r *vmBootstrapReconciler) reconcileBoostrapKubeProxy(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, _ ConditionsTracker) (ctrl.Result, error) { // No-op if the machine is not a control plane machine. if !util.IsControlPlaneMachine(machine) { return ctrl.Result{}, nil @@ -747,7 +747,7 @@ func (r *vmBootstrapReconciler) reconcileBoostrapKubeProxy(ctx context.Context, return ctrl.Result{}, nil } -func (r *vmBootstrapReconciler) reconcileBoostrapCoredns(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, _ ConditionsTracker) (ctrl.Result, error) { +func (r *vmBootstrapReconciler) reconcileBoostrapCoredns(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, _ ConditionsTracker) (ctrl.Result, error) { // No-op if the machine is not a control plane machine. if !util.IsControlPlaneMachine(machine) { return ctrl.Result{}, nil @@ -810,9 +810,9 @@ func (r *vmBootstrapReconciler) reconcileBoostrapCoredns(ctx context.Context, cl return ctrl.Result{}, nil } -func (r *vmBootstrapReconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { +func (r *vmBootstrapReconciler) reconcileDelete(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { // Call the inner reconciliation methods. - phases := []func(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error){ + phases := []func(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error){ r.reconcileDeleteNode, r.reconcileDeleteETCD, r.reconcileDeleteAPIServer, @@ -839,7 +839,7 @@ func (r *vmBootstrapReconciler) reconcileDelete(ctx context.Context, cluster *cl return res, kerrors.NewAggregate(errs) } -func (r *vmBootstrapReconciler) reconcileDeleteNode(ctx context.Context, cluster *clusterv1.Cluster, _ *clusterv1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { +func (r *vmBootstrapReconciler) reconcileDeleteNode(ctx context.Context, cluster *clusterv1beta1.Cluster, _ *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { // Compute the resource group unique name. resourceGroup := klog.KObj(cluster).String() inmemoryClient := r.InMemoryManager.GetResourceGroup(resourceGroup).GetClient() @@ -859,7 +859,7 @@ func (r *vmBootstrapReconciler) reconcileDeleteNode(ctx context.Context, cluster return ctrl.Result{}, nil } -func (r *vmBootstrapReconciler) reconcileDeleteETCD(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { +func (r *vmBootstrapReconciler) reconcileDeleteETCD(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { // No-op if the machine is not a control plane machine. if !util.IsControlPlaneMachine(machine) { return ctrl.Result{}, nil @@ -896,7 +896,7 @@ func (r *vmBootstrapReconciler) reconcileDeleteETCD(ctx context.Context, cluster return ctrl.Result{}, nil } -func (r *vmBootstrapReconciler) reconcileDeleteAPIServer(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { +func (r *vmBootstrapReconciler) reconcileDeleteAPIServer(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { // No-op if the machine is not a control plane machine. if !util.IsControlPlaneMachine(machine) { return ctrl.Result{}, nil @@ -928,7 +928,7 @@ func (r *vmBootstrapReconciler) reconcileDeleteAPIServer(ctx context.Context, cl return ctrl.Result{}, nil } -func (r *vmBootstrapReconciler) reconcileDeleteScheduler(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { +func (r *vmBootstrapReconciler) reconcileDeleteScheduler(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { // No-op if the machine is not a control plane machine. if !util.IsControlPlaneMachine(machine) { return ctrl.Result{}, nil @@ -951,7 +951,7 @@ func (r *vmBootstrapReconciler) reconcileDeleteScheduler(ctx context.Context, cl return ctrl.Result{}, nil } -func (r *vmBootstrapReconciler) reconcileDeleteControllerManager(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { +func (r *vmBootstrapReconciler) reconcileDeleteControllerManager(ctx context.Context, cluster *clusterv1beta1.Cluster, machine *clusterv1beta1.Machine, conditionsTracker ConditionsTracker) (ctrl.Result, error) { // No-op if the machine is not a control plane machine. if !util.IsControlPlaneMachine(machine) { return ctrl.Result{}, nil diff --git a/test/infrastructure/vcsim/controllers/vmoperatordependencies_controller.go b/test/infrastructure/vcsim/controllers/vmoperatordependencies_controller.go index 45aee29388..0ca2acb1c8 100644 --- a/test/infrastructure/vcsim/controllers/vmoperatordependencies_controller.go +++ b/test/infrastructure/vcsim/controllers/vmoperatordependencies_controller.go @@ -22,7 +22,7 @@ import ( "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" kerrors "k8s.io/apimachinery/pkg/util/errors" - "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" diff --git a/test/infrastructure/vcsim/controllers/vspherevm_controller.go b/test/infrastructure/vcsim/controllers/vspherevm_controller.go index 6ea4b0ead5..7b6257841d 100644 --- a/test/infrastructure/vcsim/controllers/vspherevm_controller.go +++ b/test/infrastructure/vcsim/controllers/vspherevm_controller.go @@ -24,16 +24,17 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/klog/v2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" inmemoryruntime "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/runtime" inmemoryserver "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/server" capiutil "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/conditions" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/cluster-api/util/finalizers" - "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -100,7 +101,7 @@ func (r *VSphereVMReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( ctx = ctrl.LoggerInto(ctx, log) // Fetch the Machine. - machine, err := capiutil.GetOwnerMachine(ctx, r.Client, vSphereMachine.ObjectMeta) + machine, err := getOwnerMachineV1Beta1(ctx, r.Client, vSphereMachine.ObjectMeta) if err != nil { return ctrl.Result{}, err } @@ -112,20 +113,20 @@ func (r *VSphereVMReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( ctx = ctrl.LoggerInto(ctx, log) // Fetch the Cluster. - cluster, err := capiutil.GetClusterFromMetadata(ctx, r.Client, machine.ObjectMeta) + cluster, err := getClusterV1Beta1FromMetadata(ctx, r.Client, machine.ObjectMeta) if err != nil { log.Info("VSphereVM owner Machine is missing cluster label or cluster does not exist") return ctrl.Result{}, err } if cluster == nil { - log.Info(fmt.Sprintf("Please associate this machine with a cluster using the label %s: ", clusterv1.ClusterNameLabel)) + log.Info(fmt.Sprintf("Please associate this machine with a cluster using the label %s: ", clusterv1beta1.ClusterNameLabel)) return ctrl.Result{}, nil } log = log.WithValues("Cluster", klog.KObj(cluster)) ctx = ctrl.LoggerInto(ctx, log) // Return early if the object or Cluster is paused. - if annotations.IsPaused(cluster, vSphereVM) { + if cluster.Spec.Paused || annotations.HasPaused(vSphereVM) { log.Info("Reconciliation is paused for this object") return ctrl.Result{}, nil } @@ -247,7 +248,7 @@ func (r *VSphereVMReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( return r.reconcileNormal(ctx, cluster, vSphereCluster, machine, vSphereVM, conditionsTracker) } -func (r *VSphereVMReconciler) reconcileNormal(ctx context.Context, cluster *clusterv1.Cluster, vSphereCluster *infrav1.VSphereCluster, machine *clusterv1.Machine, vSphereVM *infrav1.VSphereVM, conditionsTracker *infrav1.VSphereVM) (ctrl.Result, error) { +func (r *VSphereVMReconciler) reconcileNormal(ctx context.Context, cluster *clusterv1beta1.Cluster, vSphereCluster *infrav1.VSphereCluster, machine *clusterv1beta1.Machine, vSphereVM *infrav1.VSphereVM, conditionsTracker *infrav1.VSphereVM) (ctrl.Result, error) { ipReconciler := r.getVMIpReconciler(vSphereCluster, vSphereVM) if ret, err := ipReconciler.ReconcileIP(ctx); !ret.IsZero() || err != nil { return ret, err @@ -261,7 +262,7 @@ func (r *VSphereVMReconciler) reconcileNormal(ctx context.Context, cluster *clus return ctrl.Result{}, nil } -func (r *VSphereVMReconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Cluster, _ *infrav1.VSphereCluster, machine *clusterv1.Machine, vSphereVM *infrav1.VSphereVM, conditionsTracker *infrav1.VSphereVM) (ctrl.Result, error) { +func (r *VSphereVMReconciler) reconcileDelete(ctx context.Context, cluster *clusterv1beta1.Cluster, _ *infrav1.VSphereCluster, machine *clusterv1beta1.Machine, vSphereVM *infrav1.VSphereVM, conditionsTracker *infrav1.VSphereVM) (ctrl.Result, error) { bootstrapReconciler := r.getVMBootstrapReconciler(vSphereVM) if ret, err := bootstrapReconciler.reconcileDelete(ctx, cluster, machine, conditionsTracker); !ret.IsZero() || err != nil { return ret, err @@ -283,7 +284,7 @@ func (r *VSphereVMReconciler) getVMIpReconciler(vSphereCluster *infrav1.VSphereC }, IsVMWaitingforIP: func() bool { // A vSphereVM is waiting for an IP when not ready VMProvisioned condition is false with reason WaitingForIPAllocation - return !vSphereVM.Status.Ready && conditions.IsFalse(vSphereVM, infrav1.VMProvisionedCondition) && conditions.GetReason(vSphereVM, infrav1.VMProvisionedCondition) == infrav1.WaitingForIPAllocationReason + return !vSphereVM.Status.Ready && deprecatedconditions.IsFalse(vSphereVM, infrav1.VMProvisionedCondition) && deprecatedconditions.GetReason(vSphereVM, infrav1.VMProvisionedCondition) == infrav1.WaitingForIPAllocationReason }, GetVMPath: func() string { // Return vmref of the VM as it is populated already by CAPV @@ -345,3 +346,52 @@ func (r *VSphereVMReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Man } return nil } + +// Reimplementation of some functions at "sigs.k8s.io/cluster-api/util" to be compatible to v1beta1. + +// getClusterV1Beta1FromMetadata returns the Cluster object (if present) using the object metadata. +func getClusterV1Beta1FromMetadata(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1beta1.Cluster, error) { + if obj.Labels[clusterv1beta1.ClusterNameLabel] == "" { + return nil, errors.WithStack(capiutil.ErrNoCluster) + } + return getClusterV1Beta1ByName(ctx, c, obj.Namespace, obj.Labels[clusterv1beta1.ClusterNameLabel]) +} + +// getClusterV1Beta1ByName finds and return a Cluster object using the specified params. +func getClusterV1Beta1ByName(ctx context.Context, c client.Client, namespace, name string) (*clusterv1beta1.Cluster, error) { + cluster := &clusterv1beta1.Cluster{} + key := client.ObjectKey{ + Namespace: namespace, + Name: name, + } + + if err := c.Get(ctx, key, cluster); err != nil { + return nil, errors.Wrapf(err, "failed to get Cluster/%s", name) + } + + return cluster, nil +} + +// getOwnerMachineV1Beta1 returns the Machine object owning the current resource. +func getOwnerMachineV1Beta1(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1beta1.Machine, error) { + for _, ref := range obj.GetOwnerReferences() { + gv, err := schema.ParseGroupVersion(ref.APIVersion) + if err != nil { + return nil, err + } + if ref.Kind == "Machine" && gv.Group == clusterv1beta1.GroupVersion.Group { + return getMachineByName(ctx, c, obj.Namespace, ref.Name) + } + } + return nil, nil +} + +// getMachineByName finds and return a Machine object using the specified params. +func getMachineByName(ctx context.Context, c client.Client, namespace, name string) (*clusterv1beta1.Machine, error) { + m := &clusterv1beta1.Machine{} + key := client.ObjectKey{Name: name, Namespace: namespace} + if err := c.Get(ctx, key, m); err != nil { + return nil, err + } + return m, nil +} diff --git a/test/infrastructure/vcsim/controllers/vspherevm_controller_test.go b/test/infrastructure/vcsim/controllers/vspherevm_controller_test.go index ef4ea66e9e..c941756729 100644 --- a/test/infrastructure/vcsim/controllers/vspherevm_controller_test.go +++ b/test/infrastructure/vcsim/controllers/vspherevm_controller_test.go @@ -31,10 +31,11 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/klog/v2" "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" inmemoryruntime "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/runtime" inmemoryserver "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/server" - "sigs.k8s.io/cluster-api/util/conditions" + deprecatedconditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -185,9 +186,9 @@ func Test_Reconcile_VSphereVM(t *testing.T) { err = inmemoryClient.Get(ctx, client.ObjectKeyFromObject(vSphereVM), conditionsTracker) g.Expect(err).ToNot(HaveOccurred()) - c := conditions.Get(conditionsTracker, VMProvisionedCondition) + c := deprecatedconditions.Get(conditionsTracker, VMProvisionedCondition) g.Expect(c.Status).To(Equal(corev1.ConditionFalse)) - g.Expect(c.Severity).To(Equal(clusterv1.ConditionSeverityInfo)) + g.Expect(c.Severity).To(Equal(clusterv1beta1.ConditionSeverityInfo)) g.Expect(c.Reason).To(Equal(WaitingControlPlaneInitializedReason)) }) @@ -323,7 +324,7 @@ func Test_Reconcile_VSphereVM(t *testing.T) { err = inmemoryClient.Get(ctx, client.ObjectKeyFromObject(vSphereVM), conditionsTracker) g.Expect(err).ToNot(HaveOccurred()) - c := conditions.Get(conditionsTracker, NodeProvisionedCondition) + c := deprecatedconditions.Get(conditionsTracker, NodeProvisionedCondition) g.Expect(c.Status).To(Equal(corev1.ConditionTrue)) }) } diff --git a/test/infrastructure/vcsim/main.go b/test/infrastructure/vcsim/main.go index 744702b3b9..1c8c56a757 100644 --- a/test/infrastructure/vcsim/main.go +++ b/test/infrastructure/vcsim/main.go @@ -41,7 +41,8 @@ import ( "k8s.io/component-base/logs" logsv1 "k8s.io/component-base/logs/api/v1" "k8s.io/klog/v2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/controllers/remote" "sigs.k8s.io/cluster-api/feature" inmemoryruntime "sigs.k8s.io/cluster-api/test/infrastructure/inmemory/pkg/runtime" @@ -95,7 +96,7 @@ var ( func init() { // scheme used for operating on the management cluster. _ = corev1.AddToScheme(scheme) - _ = clusterv1.AddToScheme(scheme) + _ = clusterv1beta1.AddToScheme(scheme) _ = infrav1.AddToScheme(scheme) _ = vcsimv1.AddToScheme(scheme) _ = topologyv1.AddToScheme(scheme)