diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index 77b7577a3e..712f3faed0 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -25,14 +25,14 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v3 + uses: github/codeql-action/init@v4 with: languages: ${{ matrix.language }} # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@v3 + uses: github/codeql-action/autobuild@v4 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v3 + uses: github/codeql-action/analyze@v4 diff --git a/.github/workflows/pr-golangci-lint.yaml b/.github/workflows/pr-golangci-lint.yaml index 563f6c4e4c..a8b0a48df2 100644 --- a/.github/workflows/pr-golangci-lint.yaml +++ b/.github/workflows/pr-golangci-lint.yaml @@ -26,9 +26,9 @@ jobs: with: go-version: ${{ steps.vars.outputs.go_version }} - name: golangci-lint - uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # tag=v8.0.0 + uses: golangci/golangci-lint-action@0a35821d5c230e903fcfe077583637dea1b27b47 # tag=v9.0.0 with: - version: v2.1.0 + version: v2.7.0 working-directory: ${{matrix.working-directory}} - name: Lint API run: make lint-api diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 4bf9114bbe..52b5c4682f 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -20,7 +20,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v6 with: - go-version: '1.23' + go-version: '1.24' - name: Set version info run: | echo "VERSION=${GITHUB_REF_NAME}" >> $GITHUB_ENV diff --git a/.golangci.yml b/.golangci.yml index d87ebbdf39..049326f622 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -139,8 +139,6 @@ linters: alias: runtimeserializer - pkg: k8s.io/apimachinery/pkg/runtime/serializer/yaml alias: yamlserializer - - pkg: sigs.k8s.io/cluster-api/api/v1beta1 - alias: clusterv1 - pkg: sigs.k8s.io/cluster-api/util/defaulting alias: utildefaulting - pkg: sigs.k8s.io/controller-runtime @@ -169,8 +167,14 @@ linters: alias: crclient - pkg: k8s.io/apimachinery/pkg/types alias: apimachinerytypes - - pkg: sigs.k8s.io/cluster-api/exp/api/v1beta1 - alias: expclusterv1 + - pkg: "sigs.k8s.io/cluster-api/api/core/v1beta2" + alias: clusterv1 + - pkg: "sigs.k8s.io/cluster-api/api/core/v1beta1" + alias: clusterv1beta1 + - pkg: "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" + alias: v1beta1patch + - pkg: "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + alias: v1beta1conditions no-unaliased: false nolintlint: require-specific: true @@ -212,16 +216,16 @@ linters: # - errcheck # text: Error return value of `outputPrinter.Print` is not checked - linters: - - gosec - text: 'G103: Use of unsafe calls should be audited' + - gosec + text: "G103: Use of unsafe calls should be audited" path: .*(api|types|test)\/.*\/.*conversion.*\.go$ - linters: - staticcheck - text: 'QF1008: could remove embedded field .*' + text: "QF1008: could remove embedded field .*" # TODO: change to use time.Time.Equal - linters: - staticcheck - text: 'QF1009: probably want to use time.Time.Equal instead' + text: "QF1009: probably want to use time.Time.Equal instead" - linters: - revive # Ignoring stylistic checks for generated code @@ -233,13 +237,13 @@ linters: # Ignoring stylistic checks for generated code path: .*(api|types)\/.*\/.*conversion.*\.go$ # By convention, receiver names in a method should reflect their identity. - text: 'receiver-naming: receiver name (.+) should be consistent with previous receiver name (.+)' + text: "receiver-naming: receiver name (.+) should be consistent with previous receiver name (.+)" - linters: - revive # Ignoring stylistic checks for generated code path: .*(api|types|test)\/.*\/.*conversion.*\.go$ # Checking if an error is nil to just after return the error or nil is redundant - text: 'if-return: redundant if ...; err != nil check, just return error instead' + text: "if-return: redundant if ...; err != nil check, just return error instead" - linters: - revive text: 'exported: exported method .*\.(Reconcile|SetupWithManager|SetupWebhookWithManager) should have comment or be unexported' @@ -268,7 +272,7 @@ linters: text: Error return value of (.+) is not checked - linters: - gosec - text: 'G108: Profiling endpoint is automatically exposed on /debug/pprof' + text: "G108: Profiling endpoint is automatically exposed on /debug/pprof" - linters: - godot path: (.*)/(v1beta1|v1beta2)/(.*)types.go @@ -282,17 +286,49 @@ linters: - linters: - revive path: .*/defaults.go - text: 'var-naming: don''t use underscores in Go names; func (.+) should be (.+)' + text: "var-naming: don't use underscores in Go names; func (.+) should be (.+)" - linters: - revive path: .*/.*(mock|gc_).*/.+\.go - text: 'var-naming: don''t use an underscore in package name' + text: "var-naming: don't use an underscore in package name" - linters: - revive # Ignoring stylistic checks for generated code path: .*(api|types|test)\/.*\/.*conversion.*\.go$ # This rule warns when initialism, variable or package naming conventions are not followed. - text: 'var-naming: don''t use underscores in Go names' + text: "var-naming: don't use underscores in Go names" + - linters: + - revive + path: 'exp/utils/*' + text: 'var-naming: avoid meaningless package names' + - linters: + - revive + path: 'cmd/clusterawsadm/cmd/ami/common' + text: 'var-naming: avoid meaningless package names' + - linters: + - revive + path: 'cmd/clusterawsadm/cmd/util' + text: 'var-naming: avoid meaningless package names' + - linters: + - revive + path: 'pkg/utils' + text: 'var-naming: avoid meaningless package names' + - linters: + - revive + path: 'pkg/cloud/services/common/' + text: 'var-naming: avoid meaningless package names' + - linters: + - revive + path: 'test/e2e/shared/' + text: 'var-naming: avoid meaningless package names' + - linters: + - revive + text: 'avoid package names that conflict with Go standard library package names' + path: 'pkg/internal/bytes/' + - linters: + - revive + text: 'avoid package names that conflict with Go standard library package names' + path: 'pkg/hash/' - linters: - unparam text: always receives @@ -309,20 +345,26 @@ linters: text: cyclomatic complexity - linters: - gocritic - text: 'appendAssign: append result not assigned to the same slice' + text: "appendAssign: append result not assigned to the same slice" - path: (.+)\.go$ text: (Expect directory permissions to be 0750 or less|Expect file permissions to be 0600 or less) - path: (.+)\.go$ - text: 'exported: (func|type) name will be used as (.+) by other packages, and that stutters; consider calling this (.+)' + text: "exported: (func|type) name will be used as (.+) by other packages, and that stutters; consider calling this (.+)" - path: (.+)\.go$ text: (G104|G107|G404|G505|ST1000) - path: (.+)\.go$ - text: 'G108: Profiling endpoint is automatically exposed on /debug/pprof' + text: "G108: Profiling endpoint is automatically exposed on /debug/pprof" - path: (.+)\.go$ text: net/http.Get must not be called - linters: - goconst path: (.+)_test\.go + - linters: + - staticcheck + text: 'SA1019: "sigs.k8s.io/cluster-api/(.*)" is deprecated: This package is deprecated and is going to be removed when support for v1beta1 will be dropped.' + - linters: + - staticcheck + text: "s.scope.ControlPlaneLoadBalancer is deprecated" paths: - third_party$ - builtin$ diff --git a/Makefile b/Makefile index b820793ba8..94c83e8331 100644 --- a/Makefile +++ b/Makefile @@ -21,6 +21,7 @@ include $(ROOT_DIR_RELATIVE)/common.mk # Go GO_VERSION ?=1.24.7 +GO_DIRECTIVE_VERSION ?= 1.24.0 GO_CONTAINER_IMAGE ?= golang:$(GO_VERSION) # Directories. @@ -170,7 +171,7 @@ ifeq ($(findstring \[PR-Blocking\],$(GINKGO_FOCUS)),\[PR-Blocking\]) endif override E2E_ARGS += -artifacts-folder="$(ARTIFACTS)" --data-folder="$(E2E_DATA_DIR)" -use-existing-cluster=$(USE_EXISTING_CLUSTER) -override GINKGO_ARGS += -v --trace --timeout=4h --output-dir="$(ARTIFACTS)" --junit-report="junit.e2e_suite.xml" +override GINKGO_ARGS += -v --trace --timeout=5h --output-dir="$(ARTIFACTS)" --junit-report="junit.e2e_suite.xml" ifdef GINKGO_SKIP override GINKGO_ARGS += -skip "$(GINKGO_SKIP)" @@ -204,7 +205,7 @@ endif .PHONY: defaulters defaulters: $(DEFAULTER_GEN) ## Generate all Go types $(DEFAULTER_GEN) \ - --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1beta1 \ + --extra-peer-dirs=sigs.k8s.io/cluster-api/api/core/v1beta2 \ --v=0 \ --go-header-file=./hack/boilerplate/boilerplate.generatego.txt \ --output-file=zz_generated.defaults.go \ @@ -262,7 +263,7 @@ generate-go-apis: ## Alias for .build/generate-go-apis $(MAKE) defaulters $(CONVERSION_GEN) \ - --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1beta1 \ + --extra-peer-dirs=sigs.k8s.io/cluster-api/api/core/v1beta2 \ --output-file=zz_generated.conversion.go \ --go-header-file=./hack/boilerplate/boilerplate.generatego.txt \ ./api/v1beta1 \ @@ -270,28 +271,28 @@ generate-go-apis: ## Alias for .build/generate-go-apis $(CONVERSION_GEN) \ --extra-peer-dirs=sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1 \ - --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1beta1 \ + --extra-peer-dirs=sigs.k8s.io/cluster-api/api/core/v1beta2 \ --output-file=zz_generated.conversion.go \ --go-header-file=./hack/boilerplate/boilerplate.generatego.txt \ ./$(EXP_DIR)/api/v1beta1 $(CONVERSION_GEN) \ --extra-peer-dirs=sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1 \ - --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1beta1 \ + --extra-peer-dirs=sigs.k8s.io/cluster-api/api/core/v1beta2 \ --output-file=zz_generated.conversion.go \ --go-header-file=./hack/boilerplate/boilerplate.generatego.txt \ ./bootstrap/eks/api/v1beta1 $(CONVERSION_GEN) \ --extra-peer-dirs=sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1 \ - --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1beta1 \ + --extra-peer-dirs=sigs.k8s.io/cluster-api/api/core/v1beta2 \ --output-file=zz_generated.conversion.go \ --go-header-file=./hack/boilerplate/boilerplate.generatego.txt \ ./controlplane/eks/api/v1beta1 $(CONVERSION_GEN) \ --extra-peer-dirs=sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1 \ - --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1beta1 \ + --extra-peer-dirs=sigs.k8s.io/cluster-api/api/core/v1beta2 \ --output-file=zz_generated.conversion.go \ --go-header-file=./hack/boilerplate/boilerplate.generatego.txt \ ./controlplane/rosa/api/v1beta2 @@ -329,7 +330,7 @@ modules: ## Runs go mod to ensure proper vendoring. cd $(TOOLS_DIR); go mod tidy .PHONY: verify ## Verify ties together the rest of the verification targets into one target -verify: verify-boilerplate verify-modules verify-gen verify-conversions verify-shellcheck verify-book-links release-manifests +verify: verify-boilerplate verify-modules verify-gen verify-conversions verify-shellcheck verify-book-links release-manifests verify-go-directive .PHONY: verify-boilerplate verify-boilerplate: ## Verify boilerplate @@ -367,6 +368,12 @@ verify-gen: generate ## Verify generated files verify-container-images: ## Verify container images TRACE=$(TRACE) ./hack/verify-container-images.sh +.PHONY: verify-go-directive +verify-go-directive: + # use the core Cluster API script directly to verify the go directive matches the desired one. + # ref: https://github.com/kubernetes-sigs/cluster-api/blob/v1.10.7/hack/verify-go-directive.sh + curl --retry 3 -fsL https://raw.githubusercontent.com/kubernetes-sigs/cluster-api/refs/tags/v1.10.7/hack/verify-go-directive.sh | bash -s -- -g $(GO_DIRECTIVE_VERSION) + .PHONY: apidiff apidiff: APIDIFF_OLD_COMMIT ?= $(shell git rev-parse origin/main) apidiff: $(GO_APIDIFF) ## Check for API differences diff --git a/OWNERS b/OWNERS index 68a6ef1f91..4a3ed8834e 100644 --- a/OWNERS +++ b/OWNERS @@ -11,6 +11,7 @@ reviewers: - cluster-api-aws-reviewers emeritus_approvers: + - Ankitasw - AverageMarcus - chuckha - detiber diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 6c1b8e17f4..315fec1bbf 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -10,13 +10,10 @@ aliases: - richardcase cluster-api-aws-maintainers: - richardcase - - Ankitasw - dlipovetsky - nrb - AndiDog + - damdo cluster-api-aws-reviewers: - - luthermonson - - cnmcavoy - faiq - fiunchinho - - damdo diff --git a/PROJECT b/PROJECT index 44c9df3c2c..e160fea746 100644 --- a/PROJECT +++ b/PROJECT @@ -58,3 +58,9 @@ resources: - group: infrastructure version: v1beta2 kind: AWSManagedCluster +- group: infrastructure + kind: ROSARoleConfig + version: v1beta2 +- group: infrastructure + kind: ROSANetwork + version: v1beta2 diff --git a/api/v1beta1/awscluster_conversion.go b/api/v1beta1/awscluster_conversion.go index a201fd6935..805d60856e 100644 --- a/api/v1beta1/awscluster_conversion.go +++ b/api/v1beta1/awscluster_conversion.go @@ -67,6 +67,9 @@ func (src *AWSCluster) ConvertTo(dstRaw conversion.Hub) error { dst.Status.Bastion.HostID = restored.Status.Bastion.HostID dst.Status.Bastion.CapacityReservationPreference = restored.Status.Bastion.CapacityReservationPreference dst.Status.Bastion.CPUOptions = restored.Status.Bastion.CPUOptions + if restored.Status.Bastion.DynamicHostAllocation != nil { + dst.Status.Bastion.DynamicHostAllocation = restored.Status.Bastion.DynamicHostAllocation + } } dst.Spec.Partition = restored.Spec.Partition diff --git a/api/v1beta1/awscluster_types.go b/api/v1beta1/awscluster_types.go index ddb1d2cd5a..0c258d7cf6 100644 --- a/api/v1beta1/awscluster_types.go +++ b/api/v1beta1/awscluster_types.go @@ -19,7 +19,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( @@ -45,7 +45,7 @@ type AWSClusterSpec struct { // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. // +optional - ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"` + ControlPlaneEndpoint clusterv1beta1.APIEndpoint `json:"controlPlaneEndpoint"` // AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the // ones added by default. @@ -200,11 +200,11 @@ type AWSLoadBalancerSpec struct { // AWSClusterStatus defines the observed state of AWSCluster. type AWSClusterStatus struct { // +kubebuilder:default=false - Ready bool `json:"ready"` - Network NetworkStatus `json:"networkStatus,omitempty"` - FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` - Bastion *Instance `json:"bastion,omitempty"` - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Ready bool `json:"ready"` + Network NetworkStatus `json:"networkStatus,omitempty"` + FailureDomains clusterv1beta1.FailureDomains `json:"failureDomains,omitempty"` + Bastion *Instance `json:"bastion,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` } // S3Bucket defines a supporting S3 bucket for the cluster, currently can be optionally used for Ignition. @@ -254,12 +254,12 @@ type AWSClusterList struct { } // GetConditions returns the observations of the operational state of the AWSCluster resource. -func (r *AWSCluster) GetConditions() clusterv1.Conditions { +func (r *AWSCluster) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the AWSCluster to the predescribed clusterv1.Conditions. -func (r *AWSCluster) SetConditions(conditions clusterv1.Conditions) { +// SetConditions sets the underlying service state of the AWSCluster to the predescribed clusterv1beta1.Conditions. +func (r *AWSCluster) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/api/v1beta1/awsclustertemplate_types.go b/api/v1beta1/awsclustertemplate_types.go index 07e2cf4039..1b0d1f100f 100644 --- a/api/v1beta1/awsclustertemplate_types.go +++ b/api/v1beta1/awsclustertemplate_types.go @@ -19,7 +19,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // AWSClusterTemplateSpec defines the desired state of AWSClusterTemplate. @@ -58,6 +58,6 @@ type AWSClusterTemplateResource struct { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - ObjectMeta clusterv1.ObjectMeta `json:"metadata,omitempty"` - Spec AWSClusterSpec `json:"spec"` + ObjectMeta clusterv1beta1.ObjectMeta `json:"metadata,omitempty"` + Spec AWSClusterSpec `json:"spec"` } diff --git a/api/v1beta1/awsmachine_conversion.go b/api/v1beta1/awsmachine_conversion.go index e809b649b2..6e87547918 100644 --- a/api/v1beta1/awsmachine_conversion.go +++ b/api/v1beta1/awsmachine_conversion.go @@ -49,6 +49,9 @@ func (src *AWSMachine) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.CapacityReservationPreference = restored.Spec.CapacityReservationPreference dst.Spec.NetworkInterfaceType = restored.Spec.NetworkInterfaceType dst.Spec.CPUOptions = restored.Spec.CPUOptions + if restored.Spec.DynamicHostAllocation != nil { + dst.Spec.DynamicHostAllocation = restored.Spec.DynamicHostAllocation + } if restored.Spec.ElasticIPPool != nil { if dst.Spec.ElasticIPPool == nil { dst.Spec.ElasticIPPool = &infrav1.ElasticIPPool{} @@ -61,6 +64,7 @@ func (src *AWSMachine) ConvertTo(dstRaw conversion.Hub) error { } } + dst.Status.DedicatedHost = restored.Status.DedicatedHost return nil } @@ -117,6 +121,9 @@ func (r *AWSMachineTemplate) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.Template.Spec.CapacityReservationPreference = restored.Spec.Template.Spec.CapacityReservationPreference dst.Spec.Template.Spec.NetworkInterfaceType = restored.Spec.Template.Spec.NetworkInterfaceType dst.Spec.Template.Spec.CPUOptions = restored.Spec.Template.Spec.CPUOptions + if restored.Spec.Template.Spec.DynamicHostAllocation != nil { + dst.Spec.Template.Spec.DynamicHostAllocation = restored.Spec.Template.Spec.DynamicHostAllocation + } if restored.Spec.Template.Spec.ElasticIPPool != nil { if dst.Spec.Template.Spec.ElasticIPPool == nil { dst.Spec.Template.Spec.ElasticIPPool = &infrav1.ElasticIPPool{} @@ -129,6 +136,10 @@ func (r *AWSMachineTemplate) ConvertTo(dstRaw conversion.Hub) error { } } + // Restore Status fields that don't exist in v1beta1. + dst.Status.NodeInfo = restored.Status.NodeInfo + dst.Status.Conditions = restored.Status.Conditions + return nil } diff --git a/api/v1beta1/awsmachine_types.go b/api/v1beta1/awsmachine_types.go index 25a8cb4dcd..d6bf89d1ea 100644 --- a/api/v1beta1/awsmachine_types.go +++ b/api/v1beta1/awsmachine_types.go @@ -19,7 +19,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( @@ -207,7 +207,7 @@ type AWSMachineStatus struct { Interruptible bool `json:"interruptible,omitempty"` // Addresses contains the AWS instance associated addresses. - Addresses []clusterv1.MachineAddress `json:"addresses,omitempty"` + Addresses []clusterv1beta1.MachineAddress `json:"addresses,omitempty"` // InstanceState is the state of the AWS instance for this machine. // +optional @@ -253,7 +253,7 @@ type AWSMachineStatus struct { // Conditions defines current service state of the AWSMachine. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -276,12 +276,12 @@ type AWSMachine struct { } // GetConditions returns the observations of the operational state of the AWSMachine resource. -func (r *AWSMachine) GetConditions() clusterv1.Conditions { +func (r *AWSMachine) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the AWSMachine to the predescribed clusterv1.Conditions. -func (r *AWSMachine) SetConditions(conditions clusterv1.Conditions) { +// SetConditions sets the underlying service state of the AWSMachine to the predescribed clusterv1beta1.Conditions. +func (r *AWSMachine) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/api/v1beta1/awsmachinetemplate_types.go b/api/v1beta1/awsmachinetemplate_types.go index 6e86295c6b..6e1a98fdbc 100644 --- a/api/v1beta1/awsmachinetemplate_types.go +++ b/api/v1beta1/awsmachinetemplate_types.go @@ -20,7 +20,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // AWSMachineTemplateStatus defines a status for an AWSMachineTemplate. @@ -65,7 +65,7 @@ type AWSMachineTemplateResource struct { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - ObjectMeta clusterv1.ObjectMeta `json:"metadata,omitempty"` + ObjectMeta clusterv1beta1.ObjectMeta `json:"metadata,omitempty"` // Spec is the specification of the desired behavior of the machine. Spec AWSMachineSpec `json:"spec"` diff --git a/api/v1beta1/conditions_consts.go b/api/v1beta1/conditions_consts.go index ae5d761df1..5344719470 100644 --- a/api/v1beta1/conditions_consts.go +++ b/api/v1beta1/conditions_consts.go @@ -16,19 +16,19 @@ limitations under the License. package v1beta1 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +import clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" const ( // PrincipalCredentialRetrievedCondition reports on whether Principal credentials could be retrieved successfully. // A possible scenario, where retrieval is unsuccessful, is when SourcePrincipal is not authorized for assume role. - PrincipalCredentialRetrievedCondition clusterv1.ConditionType = "PrincipalCredentialRetrieved" + PrincipalCredentialRetrievedCondition clusterv1beta1.ConditionType = "PrincipalCredentialRetrieved" // PrincipalCredentialRetrievalFailedReason used when errors occur during identity credential retrieval. PrincipalCredentialRetrievalFailedReason = "PrincipalCredentialRetrievalFailed" // CredentialProviderBuildFailedReason used when errors occur during building providers before trying credential retrieval. //nolint:gosec CredentialProviderBuildFailedReason = "CredentialProviderBuildFailed" // PrincipalUsageAllowedCondition reports on whether Principal and all the nested source identities are allowed to be used in the AWSCluster namespace. - PrincipalUsageAllowedCondition clusterv1.ConditionType = "PrincipalUsageAllowed" + PrincipalUsageAllowedCondition clusterv1beta1.ConditionType = "PrincipalUsageAllowed" // PrincipalUsageUnauthorizedReason used when AWSCluster namespace is not in the identity's allowed namespaces list. PrincipalUsageUnauthorizedReason = "PrincipalUsageUnauthorized" // SourcePrincipalUsageUnauthorizedReason used when AWSCluster is not in the intersection of source identity allowed namespaces @@ -38,7 +38,7 @@ const ( const ( // VpcReadyCondition reports on the successful reconciliation of a VPC. - VpcReadyCondition clusterv1.ConditionType = "VpcReady" + VpcReadyCondition clusterv1beta1.ConditionType = "VpcReady" // VpcCreationStartedReason used when attempting to create a VPC for a managed cluster. // Will not be applied to unmanaged clusters. VpcCreationStartedReason = "VpcCreationStarted" @@ -48,7 +48,7 @@ const ( const ( // SubnetsReadyCondition reports on the successful reconciliation of subnets. - SubnetsReadyCondition clusterv1.ConditionType = "SubnetsReady" + SubnetsReadyCondition clusterv1beta1.ConditionType = "SubnetsReady" // SubnetsReconciliationFailedReason used to report failures while reconciling subnets. SubnetsReconciliationFailedReason = "SubnetsReconciliationFailed" ) @@ -56,7 +56,7 @@ const ( const ( // InternetGatewayReadyCondition reports on the successful reconciliation of internet gateways. // Only applicable to managed clusters. - InternetGatewayReadyCondition clusterv1.ConditionType = "InternetGatewayReady" + InternetGatewayReadyCondition clusterv1beta1.ConditionType = "InternetGatewayReady" // InternetGatewayFailedReason used when errors occur during internet gateway reconciliation. InternetGatewayFailedReason = "InternetGatewayFailed" ) @@ -64,7 +64,7 @@ const ( const ( // EgressOnlyInternetGatewayReadyCondition reports on the successful reconciliation of egress only internet gateways. // Only applicable to managed clusters. - EgressOnlyInternetGatewayReadyCondition clusterv1.ConditionType = "EgressOnlyInternetGatewayReady" + EgressOnlyInternetGatewayReadyCondition clusterv1beta1.ConditionType = "EgressOnlyInternetGatewayReady" // EgressOnlyInternetGatewayFailedReason used when errors occur during egress only internet gateway reconciliation. EgressOnlyInternetGatewayFailedReason = "EgressOnlyInternetGatewayFailed" ) @@ -72,7 +72,7 @@ const ( const ( // NatGatewaysReadyCondition reports successful reconciliation of NAT gateways. // Only applicable to managed clusters. - NatGatewaysReadyCondition clusterv1.ConditionType = "NatGatewaysReady" + NatGatewaysReadyCondition clusterv1beta1.ConditionType = "NatGatewaysReady" // NatGatewaysCreationStartedReason set once when creating new NAT gateways. NatGatewaysCreationStartedReason = "NatGatewaysCreationStarted" // NatGatewaysReconciliationFailedReason used when any errors occur during reconciliation of NAT gateways. @@ -82,7 +82,7 @@ const ( const ( // RouteTablesReadyCondition reports successful reconciliation of route tables. // Only applicable to managed clusters. - RouteTablesReadyCondition clusterv1.ConditionType = "RouteTablesReady" + RouteTablesReadyCondition clusterv1beta1.ConditionType = "RouteTablesReady" // RouteTableReconciliationFailedReason used when any errors occur during reconciliation of route tables. RouteTableReconciliationFailedReason = "RouteTableReconciliationFailed" ) @@ -90,14 +90,14 @@ const ( const ( // SecondaryCidrsReadyCondition reports successful reconciliation of secondary CIDR blocks. // Only applicable to managed clusters. - SecondaryCidrsReadyCondition clusterv1.ConditionType = "SecondaryCidrsReady" + SecondaryCidrsReadyCondition clusterv1beta1.ConditionType = "SecondaryCidrsReady" // SecondaryCidrReconciliationFailedReason used when any errors occur during reconciliation of secondary CIDR blocks. SecondaryCidrReconciliationFailedReason = "SecondaryCidrReconciliationFailed" ) const ( // ClusterSecurityGroupsReadyCondition reports successful reconciliation of security groups. - ClusterSecurityGroupsReadyCondition clusterv1.ConditionType = "ClusterSecurityGroupsReady" + ClusterSecurityGroupsReadyCondition clusterv1beta1.ConditionType = "ClusterSecurityGroupsReady" // ClusterSecurityGroupReconciliationFailedReason used when any errors occur during reconciliation of security groups. ClusterSecurityGroupReconciliationFailedReason = "SecurityGroupReconciliationFailed" ) @@ -105,7 +105,7 @@ const ( const ( // BastionHostReadyCondition reports whether a bastion host is ready. Depending on the configuration, a cluster // may not require a bastion host and this condition will be skipped. - BastionHostReadyCondition clusterv1.ConditionType = "BastionHostReady" + BastionHostReadyCondition clusterv1beta1.ConditionType = "BastionHostReady" // BastionCreationStartedReason used when creating a new bastion host. BastionCreationStartedReason = "BastionCreationStarted" // BastionHostFailedReason used when an error occurs during the creation of a bastion host. @@ -114,7 +114,7 @@ const ( const ( // LoadBalancerReadyCondition reports on whether a control plane load balancer was successfully reconciled. - LoadBalancerReadyCondition clusterv1.ConditionType = "LoadBalancerReady" + LoadBalancerReadyCondition clusterv1beta1.ConditionType = "LoadBalancerReady" // WaitForDNSNameReason used while waiting for a DNS name for the API server to be populated. WaitForDNSNameReason = "WaitForDNSName" // WaitForDNSNameResolveReason used while waiting for DNS name to resolve. @@ -125,7 +125,7 @@ const ( const ( // InstanceReadyCondition reports on current status of the EC2 instance. Ready indicates the instance is in a Running state. - InstanceReadyCondition clusterv1.ConditionType = "InstanceReady" + InstanceReadyCondition clusterv1beta1.ConditionType = "InstanceReady" // InstanceNotFoundReason used when the instance couldn't be retrieved. InstanceNotFoundReason = "InstanceNotFound" @@ -147,7 +147,7 @@ const ( const ( // SecurityGroupsReadyCondition indicates the security groups are up to date on the AWSMachine. - SecurityGroupsReadyCondition clusterv1.ConditionType = "SecurityGroupsReady" + SecurityGroupsReadyCondition clusterv1beta1.ConditionType = "SecurityGroupsReady" // SecurityGroupsFailedReason used when the security groups could not be synced. SecurityGroupsFailedReason = "SecurityGroupsSyncFailed" @@ -158,7 +158,7 @@ const ( // When set to false, severity can be an Error if the subnet is not found or unavailable in the instance's AZ. // Note this is only applicable to control plane machines. // Only applicable to control plane machines. - ELBAttachedCondition clusterv1.ConditionType = "ELBAttached" + ELBAttachedCondition clusterv1beta1.ConditionType = "ELBAttached" // ELBAttachFailedReason used when a control plane node fails to attach to the ELB. ELBAttachFailedReason = "ELBAttachFailed" @@ -168,7 +168,7 @@ const ( const ( // S3BucketReadyCondition indicates an S3 bucket has been created successfully. - S3BucketReadyCondition clusterv1.ConditionType = "S3BucketCreated" + S3BucketReadyCondition clusterv1beta1.ConditionType = "S3BucketCreated" // S3BucketFailedReason is used when any errors occur during reconciliation of an S3 bucket. S3BucketFailedReason = "S3BucketCreationFailed" diff --git a/api/v1beta1/conversion.go b/api/v1beta1/conversion.go index 6247cfeab1..a2a895bfd1 100644 --- a/api/v1beta1/conversion.go +++ b/api/v1beta1/conversion.go @@ -103,3 +103,13 @@ func Convert_v1beta2_S3Bucket_To_v1beta1_S3Bucket(in *v1beta2.S3Bucket, out *S3B func Convert_v1beta2_Ignition_To_v1beta1_Ignition(in *v1beta2.Ignition, out *Ignition, s conversion.Scope) error { return autoConvert_v1beta2_Ignition_To_v1beta1_Ignition(in, out, s) } + +func Convert_v1beta2_AWSMachineStatus_To_v1beta1_AWSMachineStatus(in *v1beta2.AWSMachineStatus, out *AWSMachineStatus, s conversion.Scope) error { + // Note: DedicatedHostID is not present in v1beta1, so it will be dropped during conversion + return autoConvert_v1beta2_AWSMachineStatus_To_v1beta1_AWSMachineStatus(in, out, s) +} + +func Convert_v1beta2_AWSMachineTemplateStatus_To_v1beta1_AWSMachineTemplateStatus(in *v1beta2.AWSMachineTemplateStatus, out *AWSMachineTemplateStatus, s conversion.Scope) error { + // NodeInfo and Conditions fields are ignored (dropped) as they don't exist in v1beta1 + return autoConvert_v1beta2_AWSMachineTemplateStatus_To_v1beta1_AWSMachineTemplateStatus(in, out, s) +} diff --git a/api/v1beta1/conversion_test.go b/api/v1beta1/conversion_test.go index c0d261f219..b2bcdfb93d 100644 --- a/api/v1beta1/conversion_test.go +++ b/api/v1beta1/conversion_test.go @@ -19,11 +19,11 @@ package v1beta1 import ( "testing" - fuzz "github.com/google/gofuzz" . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/api/apitesting/fuzzer" "k8s.io/apimachinery/pkg/runtime" runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" + "sigs.k8s.io/randfill" "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" utilconversion "sigs.k8s.io/cluster-api/util/conversion" @@ -36,8 +36,8 @@ func fuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} { } } -func AWSMachineFuzzer(obj *AWSMachine, c fuzz.Continue) { - c.FuzzNoCustom(obj) +func AWSMachineFuzzer(obj *AWSMachine, c randfill.Continue) { + c.FillNoCustom(obj) // AWSMachine.Spec.FailureDomain, AWSMachine.Spec.Subnet.ARN and AWSMachine.Spec.AdditionalSecurityGroups.ARN has been removed in v1beta2, so setting it to nil in order to avoid v1beta1 --> v1beta2 --> v1beta1 round trip errors. if obj.Spec.Subnet != nil { @@ -52,8 +52,8 @@ func AWSMachineFuzzer(obj *AWSMachine, c fuzz.Continue) { obj.Spec.FailureDomain = nil } -func AWSMachineTemplateFuzzer(obj *AWSMachineTemplate, c fuzz.Continue) { - c.FuzzNoCustom(obj) +func AWSMachineTemplateFuzzer(obj *AWSMachineTemplate, c randfill.Continue) { + c.FillNoCustom(obj) // AWSMachineTemplate.Spec.Template.Spec.FailureDomain, AWSMachineTemplate.Spec.Template.Spec.Subnet.ARN and AWSMachineTemplate.Spec.Template.Spec.AdditionalSecurityGroups.ARN has been removed in v1beta2, so setting it to nil in order to avoid v1beta1 --> v1beta2 --> v1beta round trip errors. if obj.Spec.Template.Spec.Subnet != nil { diff --git a/api/v1beta1/tags.go b/api/v1beta1/tags.go index a727d39cf4..1d711937a0 100644 --- a/api/v1beta1/tags.go +++ b/api/v1beta1/tags.go @@ -24,7 +24,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/validation/field" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // Tags defines a map of tags. diff --git a/api/v1beta1/types.go b/api/v1beta1/types.go index fe6510380b..5002243fd4 100644 --- a/api/v1beta1/types.go +++ b/api/v1beta1/types.go @@ -19,7 +19,7 @@ package v1beta1 import ( "k8s.io/apimachinery/pkg/util/sets" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // AWSResourceReference is a reference to a specific AWS resource by ID or filters. @@ -32,6 +32,7 @@ type AWSResourceReference struct { // ARN of resource. // +optional + // // Deprecated: This field has no function and is going to be removed in the next release. ARN *string `json:"arn,omitempty"` @@ -165,7 +166,7 @@ type Instance struct { IAMProfile string `json:"iamProfile,omitempty"` // Addresses contains the AWS instance associated addresses. - Addresses []clusterv1.MachineAddress `json:"addresses,omitempty"` + Addresses []clusterv1beta1.MachineAddress `json:"addresses,omitempty"` // The private IPv4 address assigned to the instance. PrivateIP *string `json:"privateIp,omitempty"` diff --git a/api/v1beta1/zz_generated.conversion.go b/api/v1beta1/zz_generated.conversion.go index 9c7a33e9fb..44f2227f38 100644 --- a/api/v1beta1/zz_generated.conversion.go +++ b/api/v1beta1/zz_generated.conversion.go @@ -29,7 +29,7 @@ import ( conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" v1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + corev1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func init() { @@ -144,11 +144,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.AWSClusterSpec)(nil), (*AWSClusterSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_AWSClusterSpec_To_v1beta1_AWSClusterSpec(a.(*v1beta2.AWSClusterSpec), b.(*AWSClusterSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*AWSClusterStaticIdentity)(nil), (*v1beta2.AWSClusterStaticIdentity)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_AWSClusterStaticIdentity_To_v1beta2_AWSClusterStaticIdentity(a.(*AWSClusterStaticIdentity), b.(*v1beta2.AWSClusterStaticIdentity), scope) }); err != nil { @@ -244,11 +239,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.AWSLoadBalancerSpec)(nil), (*AWSLoadBalancerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_AWSLoadBalancerSpec_To_v1beta1_AWSLoadBalancerSpec(a.(*v1beta2.AWSLoadBalancerSpec), b.(*AWSLoadBalancerSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*AWSMachine)(nil), (*v1beta2.AWSMachine)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_AWSMachine_To_v1beta2_AWSMachine(a.(*AWSMachine), b.(*v1beta2.AWSMachine), scope) }); err != nil { @@ -269,26 +259,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*AWSMachineSpec)(nil), (*v1beta2.AWSMachineSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_AWSMachineSpec_To_v1beta2_AWSMachineSpec(a.(*AWSMachineSpec), b.(*v1beta2.AWSMachineSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.AWSMachineSpec)(nil), (*AWSMachineSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_AWSMachineSpec_To_v1beta1_AWSMachineSpec(a.(*v1beta2.AWSMachineSpec), b.(*AWSMachineSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*AWSMachineStatus)(nil), (*v1beta2.AWSMachineStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_AWSMachineStatus_To_v1beta2_AWSMachineStatus(a.(*AWSMachineStatus), b.(*v1beta2.AWSMachineStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.AWSMachineStatus)(nil), (*AWSMachineStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_AWSMachineStatus_To_v1beta1_AWSMachineStatus(a.(*v1beta2.AWSMachineStatus), b.(*AWSMachineStatus), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*AWSMachineTemplate)(nil), (*v1beta2.AWSMachineTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_AWSMachineTemplate_To_v1beta2_AWSMachineTemplate(a.(*AWSMachineTemplate), b.(*v1beta2.AWSMachineTemplate), scope) }); err != nil { @@ -334,16 +309,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.AWSMachineTemplateStatus)(nil), (*AWSMachineTemplateStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_AWSMachineTemplateStatus_To_v1beta1_AWSMachineTemplateStatus(a.(*v1beta2.AWSMachineTemplateStatus), b.(*AWSMachineTemplateStatus), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*AWSResourceReference)(nil), (*v1beta2.AWSResourceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_AWSResourceReference_To_v1beta2_AWSResourceReference(a.(*AWSResourceReference), b.(*v1beta2.AWSResourceReference), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1beta2.AWSResourceReference)(nil), (*AWSResourceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_AWSResourceReference_To_v1beta1_AWSResourceReference(a.(*v1beta2.AWSResourceReference), b.(*AWSResourceReference), scope) }); err != nil { @@ -464,61 +429,31 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.IPv6)(nil), (*IPv6)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_IPv6_To_v1beta1_IPv6(a.(*v1beta2.IPv6), b.(*IPv6), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*Ignition)(nil), (*v1beta2.Ignition)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_Ignition_To_v1beta2_Ignition(a.(*Ignition), b.(*v1beta2.Ignition), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.Ignition)(nil), (*Ignition)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_Ignition_To_v1beta1_Ignition(a.(*v1beta2.Ignition), b.(*Ignition), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*IngressRule)(nil), (*v1beta2.IngressRule)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_IngressRule_To_v1beta2_IngressRule(a.(*IngressRule), b.(*v1beta2.IngressRule), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.IngressRule)(nil), (*IngressRule)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_IngressRule_To_v1beta1_IngressRule(a.(*v1beta2.IngressRule), b.(*IngressRule), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*Instance)(nil), (*v1beta2.Instance)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_Instance_To_v1beta2_Instance(a.(*Instance), b.(*v1beta2.Instance), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.Instance)(nil), (*Instance)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_Instance_To_v1beta1_Instance(a.(*v1beta2.Instance), b.(*Instance), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*NetworkSpec)(nil), (*v1beta2.NetworkSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_NetworkSpec_To_v1beta2_NetworkSpec(a.(*NetworkSpec), b.(*v1beta2.NetworkSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.NetworkSpec)(nil), (*NetworkSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_NetworkSpec_To_v1beta1_NetworkSpec(a.(*v1beta2.NetworkSpec), b.(*NetworkSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*NetworkStatus)(nil), (*v1beta2.NetworkStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_NetworkStatus_To_v1beta2_NetworkStatus(a.(*NetworkStatus), b.(*v1beta2.NetworkStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.NetworkStatus)(nil), (*NetworkStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_NetworkStatus_To_v1beta1_NetworkStatus(a.(*v1beta2.NetworkStatus), b.(*NetworkStatus), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*RouteTable)(nil), (*v1beta2.RouteTable)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_RouteTable_To_v1beta2_RouteTable(a.(*RouteTable), b.(*v1beta2.RouteTable), scope) }); err != nil { @@ -534,11 +469,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.S3Bucket)(nil), (*S3Bucket)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_S3Bucket_To_v1beta1_S3Bucket(a.(*v1beta2.S3Bucket), b.(*S3Bucket), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*SecurityGroup)(nil), (*v1beta2.SecurityGroup)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_SecurityGroup_To_v1beta2_SecurityGroup(a.(*SecurityGroup), b.(*v1beta2.SecurityGroup), scope) }); err != nil { @@ -564,21 +494,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.SubnetSpec)(nil), (*SubnetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_SubnetSpec_To_v1beta1_SubnetSpec(a.(*v1beta2.SubnetSpec), b.(*SubnetSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*VPCSpec)(nil), (*v1beta2.VPCSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_VPCSpec_To_v1beta2_VPCSpec(a.(*VPCSpec), b.(*v1beta2.VPCSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.VPCSpec)(nil), (*VPCSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_VPCSpec_To_v1beta1_VPCSpec(a.(*v1beta2.VPCSpec), b.(*VPCSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*Volume)(nil), (*v1beta2.Volume)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_Volume_To_v1beta2_Volume(a.(*Volume), b.(*v1beta2.Volume), scope) }); err != nil { @@ -589,6 +509,96 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*AWSMachineSpec)(nil), (*v1beta2.AWSMachineSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_AWSMachineSpec_To_v1beta2_AWSMachineSpec(a.(*AWSMachineSpec), b.(*v1beta2.AWSMachineSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*AWSResourceReference)(nil), (*v1beta2.AWSResourceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_AWSResourceReference_To_v1beta2_AWSResourceReference(a.(*AWSResourceReference), b.(*v1beta2.AWSResourceReference), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*ClassicELB)(nil), (*v1beta2.LoadBalancer)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ClassicELB_To_v1beta2_LoadBalancer(a.(*ClassicELB), b.(*v1beta2.LoadBalancer), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.AWSClusterSpec)(nil), (*AWSClusterSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_AWSClusterSpec_To_v1beta1_AWSClusterSpec(a.(*v1beta2.AWSClusterSpec), b.(*AWSClusterSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.AWSLoadBalancerSpec)(nil), (*AWSLoadBalancerSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_AWSLoadBalancerSpec_To_v1beta1_AWSLoadBalancerSpec(a.(*v1beta2.AWSLoadBalancerSpec), b.(*AWSLoadBalancerSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.AWSMachineSpec)(nil), (*AWSMachineSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_AWSMachineSpec_To_v1beta1_AWSMachineSpec(a.(*v1beta2.AWSMachineSpec), b.(*AWSMachineSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.AWSMachineStatus)(nil), (*AWSMachineStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_AWSMachineStatus_To_v1beta1_AWSMachineStatus(a.(*v1beta2.AWSMachineStatus), b.(*AWSMachineStatus), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.AWSMachineTemplateStatus)(nil), (*AWSMachineTemplateStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_AWSMachineTemplateStatus_To_v1beta1_AWSMachineTemplateStatus(a.(*v1beta2.AWSMachineTemplateStatus), b.(*AWSMachineTemplateStatus), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.IPv6)(nil), (*IPv6)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_IPv6_To_v1beta1_IPv6(a.(*v1beta2.IPv6), b.(*IPv6), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.Ignition)(nil), (*Ignition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_Ignition_To_v1beta1_Ignition(a.(*v1beta2.Ignition), b.(*Ignition), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.IngressRule)(nil), (*IngressRule)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_IngressRule_To_v1beta1_IngressRule(a.(*v1beta2.IngressRule), b.(*IngressRule), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.Instance)(nil), (*Instance)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_Instance_To_v1beta1_Instance(a.(*v1beta2.Instance), b.(*Instance), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.LoadBalancer)(nil), (*ClassicELB)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_LoadBalancer_To_v1beta1_ClassicELB(a.(*v1beta2.LoadBalancer), b.(*ClassicELB), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.NetworkSpec)(nil), (*NetworkSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_NetworkSpec_To_v1beta1_NetworkSpec(a.(*v1beta2.NetworkSpec), b.(*NetworkSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.NetworkStatus)(nil), (*NetworkStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_NetworkStatus_To_v1beta1_NetworkStatus(a.(*v1beta2.NetworkStatus), b.(*NetworkStatus), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.S3Bucket)(nil), (*S3Bucket)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_S3Bucket_To_v1beta1_S3Bucket(a.(*v1beta2.S3Bucket), b.(*S3Bucket), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.SubnetSpec)(nil), (*SubnetSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_SubnetSpec_To_v1beta1_SubnetSpec(a.(*v1beta2.SubnetSpec), b.(*SubnetSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.VPCSpec)(nil), (*VPCSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_VPCSpec_To_v1beta1_VPCSpec(a.(*v1beta2.VPCSpec), b.(*VPCSpec), scope) + }); err != nil { + return err + } return nil } @@ -615,7 +625,6 @@ func Convert_v1beta2_AMIReference_To_v1beta1_AMIReference(in *v1beta2.AMIReferen } func autoConvert_v1beta1_AWSCluster_To_v1beta2_AWSCluster(in *AWSCluster, out *v1beta2.AWSCluster, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_AWSClusterSpec_To_v1beta2_AWSClusterSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -632,7 +641,6 @@ func Convert_v1beta1_AWSCluster_To_v1beta2_AWSCluster(in *AWSCluster, out *v1bet } func autoConvert_v1beta2_AWSCluster_To_v1beta1_AWSCluster(in *v1beta2.AWSCluster, out *AWSCluster, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta2_AWSClusterSpec_To_v1beta1_AWSClusterSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -649,7 +657,6 @@ func Convert_v1beta2_AWSCluster_To_v1beta1_AWSCluster(in *v1beta2.AWSCluster, ou } func autoConvert_v1beta1_AWSClusterControllerIdentity_To_v1beta2_AWSClusterControllerIdentity(in *AWSClusterControllerIdentity, out *v1beta2.AWSClusterControllerIdentity, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_AWSClusterControllerIdentitySpec_To_v1beta2_AWSClusterControllerIdentitySpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -663,7 +670,6 @@ func Convert_v1beta1_AWSClusterControllerIdentity_To_v1beta2_AWSClusterControlle } func autoConvert_v1beta2_AWSClusterControllerIdentity_To_v1beta1_AWSClusterControllerIdentity(in *v1beta2.AWSClusterControllerIdentity, out *AWSClusterControllerIdentity, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta2_AWSClusterControllerIdentitySpec_To_v1beta1_AWSClusterControllerIdentitySpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -677,7 +683,6 @@ func Convert_v1beta2_AWSClusterControllerIdentity_To_v1beta1_AWSClusterControlle } func autoConvert_v1beta1_AWSClusterControllerIdentityList_To_v1beta2_AWSClusterControllerIdentityList(in *AWSClusterControllerIdentityList, out *v1beta2.AWSClusterControllerIdentityList, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta out.Items = *(*[]v1beta2.AWSClusterControllerIdentity)(unsafe.Pointer(&in.Items)) return nil @@ -689,7 +694,6 @@ func Convert_v1beta1_AWSClusterControllerIdentityList_To_v1beta2_AWSClusterContr } func autoConvert_v1beta2_AWSClusterControllerIdentityList_To_v1beta1_AWSClusterControllerIdentityList(in *v1beta2.AWSClusterControllerIdentityList, out *AWSClusterControllerIdentityList, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta out.Items = *(*[]AWSClusterControllerIdentity)(unsafe.Pointer(&in.Items)) return nil @@ -745,7 +749,6 @@ func Convert_v1beta2_AWSClusterIdentitySpec_To_v1beta1_AWSClusterIdentitySpec(in } func autoConvert_v1beta1_AWSClusterList_To_v1beta2_AWSClusterList(in *AWSClusterList, out *v1beta2.AWSClusterList, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items @@ -767,7 +770,6 @@ func Convert_v1beta1_AWSClusterList_To_v1beta2_AWSClusterList(in *AWSClusterList } func autoConvert_v1beta2_AWSClusterList_To_v1beta1_AWSClusterList(in *v1beta2.AWSClusterList, out *AWSClusterList, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items @@ -789,7 +791,6 @@ func Convert_v1beta2_AWSClusterList_To_v1beta1_AWSClusterList(in *v1beta2.AWSClu } func autoConvert_v1beta1_AWSClusterRoleIdentity_To_v1beta2_AWSClusterRoleIdentity(in *AWSClusterRoleIdentity, out *v1beta2.AWSClusterRoleIdentity, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_AWSClusterRoleIdentitySpec_To_v1beta2_AWSClusterRoleIdentitySpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -803,7 +804,6 @@ func Convert_v1beta1_AWSClusterRoleIdentity_To_v1beta2_AWSClusterRoleIdentity(in } func autoConvert_v1beta2_AWSClusterRoleIdentity_To_v1beta1_AWSClusterRoleIdentity(in *v1beta2.AWSClusterRoleIdentity, out *AWSClusterRoleIdentity, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta2_AWSClusterRoleIdentitySpec_To_v1beta1_AWSClusterRoleIdentitySpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -817,7 +817,6 @@ func Convert_v1beta2_AWSClusterRoleIdentity_To_v1beta1_AWSClusterRoleIdentity(in } func autoConvert_v1beta1_AWSClusterRoleIdentityList_To_v1beta2_AWSClusterRoleIdentityList(in *AWSClusterRoleIdentityList, out *v1beta2.AWSClusterRoleIdentityList, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta out.Items = *(*[]v1beta2.AWSClusterRoleIdentity)(unsafe.Pointer(&in.Items)) return nil @@ -829,7 +828,6 @@ func Convert_v1beta1_AWSClusterRoleIdentityList_To_v1beta2_AWSClusterRoleIdentit } func autoConvert_v1beta2_AWSClusterRoleIdentityList_To_v1beta1_AWSClusterRoleIdentityList(in *v1beta2.AWSClusterRoleIdentityList, out *AWSClusterRoleIdentityList, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta out.Items = *(*[]AWSClusterRoleIdentity)(unsafe.Pointer(&in.Items)) return nil @@ -954,7 +952,6 @@ func autoConvert_v1beta2_AWSClusterSpec_To_v1beta1_AWSClusterSpec(in *v1beta2.AW } func autoConvert_v1beta1_AWSClusterStaticIdentity_To_v1beta2_AWSClusterStaticIdentity(in *AWSClusterStaticIdentity, out *v1beta2.AWSClusterStaticIdentity, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_AWSClusterStaticIdentitySpec_To_v1beta2_AWSClusterStaticIdentitySpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -968,7 +965,6 @@ func Convert_v1beta1_AWSClusterStaticIdentity_To_v1beta2_AWSClusterStaticIdentit } func autoConvert_v1beta2_AWSClusterStaticIdentity_To_v1beta1_AWSClusterStaticIdentity(in *v1beta2.AWSClusterStaticIdentity, out *AWSClusterStaticIdentity, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta2_AWSClusterStaticIdentitySpec_To_v1beta1_AWSClusterStaticIdentitySpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -982,7 +978,6 @@ func Convert_v1beta2_AWSClusterStaticIdentity_To_v1beta1_AWSClusterStaticIdentit } func autoConvert_v1beta1_AWSClusterStaticIdentityList_To_v1beta2_AWSClusterStaticIdentityList(in *AWSClusterStaticIdentityList, out *v1beta2.AWSClusterStaticIdentityList, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta out.Items = *(*[]v1beta2.AWSClusterStaticIdentity)(unsafe.Pointer(&in.Items)) return nil @@ -994,7 +989,6 @@ func Convert_v1beta1_AWSClusterStaticIdentityList_To_v1beta2_AWSClusterStaticIde } func autoConvert_v1beta2_AWSClusterStaticIdentityList_To_v1beta1_AWSClusterStaticIdentityList(in *v1beta2.AWSClusterStaticIdentityList, out *AWSClusterStaticIdentityList, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta out.Items = *(*[]AWSClusterStaticIdentity)(unsafe.Pointer(&in.Items)) return nil @@ -1036,7 +1030,7 @@ func autoConvert_v1beta1_AWSClusterStatus_To_v1beta2_AWSClusterStatus(in *AWSClu if err := Convert_v1beta1_NetworkStatus_To_v1beta2_NetworkStatus(&in.Network, &out.Network, s); err != nil { return err } - out.FailureDomains = *(*apiv1beta1.FailureDomains)(unsafe.Pointer(&in.FailureDomains)) + out.FailureDomains = *(*corev1beta1.FailureDomains)(unsafe.Pointer(&in.FailureDomains)) if in.Bastion != nil { in, out := &in.Bastion, &out.Bastion *out = new(v1beta2.Instance) @@ -1046,7 +1040,7 @@ func autoConvert_v1beta1_AWSClusterStatus_To_v1beta2_AWSClusterStatus(in *AWSClu } else { out.Bastion = nil } - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } @@ -1060,7 +1054,7 @@ func autoConvert_v1beta2_AWSClusterStatus_To_v1beta1_AWSClusterStatus(in *v1beta if err := Convert_v1beta2_NetworkStatus_To_v1beta1_NetworkStatus(&in.Network, &out.Network, s); err != nil { return err } - out.FailureDomains = *(*apiv1beta1.FailureDomains)(unsafe.Pointer(&in.FailureDomains)) + out.FailureDomains = *(*corev1beta1.FailureDomains)(unsafe.Pointer(&in.FailureDomains)) if in.Bastion != nil { in, out := &in.Bastion, &out.Bastion *out = new(Instance) @@ -1070,7 +1064,7 @@ func autoConvert_v1beta2_AWSClusterStatus_To_v1beta1_AWSClusterStatus(in *v1beta } else { out.Bastion = nil } - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } @@ -1080,7 +1074,6 @@ func Convert_v1beta2_AWSClusterStatus_To_v1beta1_AWSClusterStatus(in *v1beta2.AW } func autoConvert_v1beta1_AWSClusterTemplate_To_v1beta2_AWSClusterTemplate(in *AWSClusterTemplate, out *v1beta2.AWSClusterTemplate, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_AWSClusterTemplateSpec_To_v1beta2_AWSClusterTemplateSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -1094,7 +1087,6 @@ func Convert_v1beta1_AWSClusterTemplate_To_v1beta2_AWSClusterTemplate(in *AWSClu } func autoConvert_v1beta2_AWSClusterTemplate_To_v1beta1_AWSClusterTemplate(in *v1beta2.AWSClusterTemplate, out *AWSClusterTemplate, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta2_AWSClusterTemplateSpec_To_v1beta1_AWSClusterTemplateSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -1108,7 +1100,6 @@ func Convert_v1beta2_AWSClusterTemplate_To_v1beta1_AWSClusterTemplate(in *v1beta } func autoConvert_v1beta1_AWSClusterTemplateList_To_v1beta2_AWSClusterTemplateList(in *AWSClusterTemplateList, out *v1beta2.AWSClusterTemplateList, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items @@ -1130,7 +1121,6 @@ func Convert_v1beta1_AWSClusterTemplateList_To_v1beta2_AWSClusterTemplateList(in } func autoConvert_v1beta2_AWSClusterTemplateList_To_v1beta1_AWSClusterTemplateList(in *v1beta2.AWSClusterTemplateList, out *AWSClusterTemplateList, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items @@ -1255,7 +1245,6 @@ func autoConvert_v1beta2_AWSLoadBalancerSpec_To_v1beta1_AWSLoadBalancerSpec(in * } func autoConvert_v1beta1_AWSMachine_To_v1beta2_AWSMachine(in *AWSMachine, out *v1beta2.AWSMachine, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_AWSMachineSpec_To_v1beta2_AWSMachineSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -1272,7 +1261,6 @@ func Convert_v1beta1_AWSMachine_To_v1beta2_AWSMachine(in *AWSMachine, out *v1bet } func autoConvert_v1beta2_AWSMachine_To_v1beta1_AWSMachine(in *v1beta2.AWSMachine, out *AWSMachine, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta2_AWSMachineSpec_To_v1beta1_AWSMachineSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -1289,7 +1277,6 @@ func Convert_v1beta2_AWSMachine_To_v1beta1_AWSMachine(in *v1beta2.AWSMachine, ou } func autoConvert_v1beta1_AWSMachineList_To_v1beta2_AWSMachineList(in *AWSMachineList, out *v1beta2.AWSMachineList, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items @@ -1311,7 +1298,6 @@ func Convert_v1beta1_AWSMachineList_To_v1beta2_AWSMachineList(in *AWSMachineList } func autoConvert_v1beta2_AWSMachineList_To_v1beta1_AWSMachineList(in *v1beta2.AWSMachineList, out *AWSMachineList, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items @@ -1452,6 +1438,7 @@ func autoConvert_v1beta2_AWSMachineSpec_To_v1beta1_AWSMachineSpec(in *v1beta2.AW // WARNING: in.MarketType requires manual conversion: does not exist in peer-type // WARNING: in.HostID requires manual conversion: does not exist in peer-type // WARNING: in.HostAffinity requires manual conversion: does not exist in peer-type + // WARNING: in.DynamicHostAllocation requires manual conversion: does not exist in peer-type // WARNING: in.CapacityReservationPreference requires manual conversion: does not exist in peer-type return nil } @@ -1459,11 +1446,11 @@ func autoConvert_v1beta2_AWSMachineSpec_To_v1beta1_AWSMachineSpec(in *v1beta2.AW func autoConvert_v1beta1_AWSMachineStatus_To_v1beta2_AWSMachineStatus(in *AWSMachineStatus, out *v1beta2.AWSMachineStatus, s conversion.Scope) error { out.Ready = in.Ready out.Interruptible = in.Interruptible - out.Addresses = *(*[]apiv1beta1.MachineAddress)(unsafe.Pointer(&in.Addresses)) + out.Addresses = *(*[]corev1beta1.MachineAddress)(unsafe.Pointer(&in.Addresses)) out.InstanceState = (*v1beta2.InstanceState)(unsafe.Pointer(in.InstanceState)) out.FailureReason = (*string)(unsafe.Pointer(in.FailureReason)) out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } @@ -1475,21 +1462,16 @@ func Convert_v1beta1_AWSMachineStatus_To_v1beta2_AWSMachineStatus(in *AWSMachine func autoConvert_v1beta2_AWSMachineStatus_To_v1beta1_AWSMachineStatus(in *v1beta2.AWSMachineStatus, out *AWSMachineStatus, s conversion.Scope) error { out.Ready = in.Ready out.Interruptible = in.Interruptible - out.Addresses = *(*[]apiv1beta1.MachineAddress)(unsafe.Pointer(&in.Addresses)) + out.Addresses = *(*[]corev1beta1.MachineAddress)(unsafe.Pointer(&in.Addresses)) out.InstanceState = (*InstanceState)(unsafe.Pointer(in.InstanceState)) out.FailureReason = (*string)(unsafe.Pointer(in.FailureReason)) out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + // WARNING: in.DedicatedHost requires manual conversion: does not exist in peer-type return nil } -// Convert_v1beta2_AWSMachineStatus_To_v1beta1_AWSMachineStatus is an autogenerated conversion function. -func Convert_v1beta2_AWSMachineStatus_To_v1beta1_AWSMachineStatus(in *v1beta2.AWSMachineStatus, out *AWSMachineStatus, s conversion.Scope) error { - return autoConvert_v1beta2_AWSMachineStatus_To_v1beta1_AWSMachineStatus(in, out, s) -} - func autoConvert_v1beta1_AWSMachineTemplate_To_v1beta2_AWSMachineTemplate(in *AWSMachineTemplate, out *v1beta2.AWSMachineTemplate, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_AWSMachineTemplateSpec_To_v1beta2_AWSMachineTemplateSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -1506,7 +1488,6 @@ func Convert_v1beta1_AWSMachineTemplate_To_v1beta2_AWSMachineTemplate(in *AWSMac } func autoConvert_v1beta2_AWSMachineTemplate_To_v1beta1_AWSMachineTemplate(in *v1beta2.AWSMachineTemplate, out *AWSMachineTemplate, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta2_AWSMachineTemplateSpec_To_v1beta1_AWSMachineTemplateSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -1523,7 +1504,6 @@ func Convert_v1beta2_AWSMachineTemplate_To_v1beta1_AWSMachineTemplate(in *v1beta } func autoConvert_v1beta1_AWSMachineTemplateList_To_v1beta2_AWSMachineTemplateList(in *AWSMachineTemplateList, out *v1beta2.AWSMachineTemplateList, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items @@ -1545,7 +1525,6 @@ func Convert_v1beta1_AWSMachineTemplateList_To_v1beta2_AWSMachineTemplateList(in } func autoConvert_v1beta2_AWSMachineTemplateList_To_v1beta1_AWSMachineTemplateList(in *v1beta2.AWSMachineTemplateList, out *AWSMachineTemplateList, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items @@ -1628,14 +1607,11 @@ func Convert_v1beta1_AWSMachineTemplateStatus_To_v1beta2_AWSMachineTemplateStatu func autoConvert_v1beta2_AWSMachineTemplateStatus_To_v1beta1_AWSMachineTemplateStatus(in *v1beta2.AWSMachineTemplateStatus, out *AWSMachineTemplateStatus, s conversion.Scope) error { out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) + // WARNING: in.NodeInfo requires manual conversion: does not exist in peer-type + // WARNING: in.Conditions requires manual conversion: does not exist in peer-type return nil } -// Convert_v1beta2_AWSMachineTemplateStatus_To_v1beta1_AWSMachineTemplateStatus is an autogenerated conversion function. -func Convert_v1beta2_AWSMachineTemplateStatus_To_v1beta1_AWSMachineTemplateStatus(in *v1beta2.AWSMachineTemplateStatus, out *AWSMachineTemplateStatus, s conversion.Scope) error { - return autoConvert_v1beta2_AWSMachineTemplateStatus_To_v1beta1_AWSMachineTemplateStatus(in, out, s) -} - func autoConvert_v1beta1_AWSResourceReference_To_v1beta2_AWSResourceReference(in *AWSResourceReference, out *v1beta2.AWSResourceReference, s conversion.Scope) error { out.ID = (*string)(unsafe.Pointer(in.ID)) // WARNING: in.ARN requires manual conversion: does not exist in peer-type @@ -2009,7 +1985,7 @@ func autoConvert_v1beta1_Instance_To_v1beta2_Instance(in *Instance, out *v1beta2 out.SecurityGroupIDs = *(*[]string)(unsafe.Pointer(&in.SecurityGroupIDs)) out.UserData = (*string)(unsafe.Pointer(in.UserData)) out.IAMProfile = in.IAMProfile - out.Addresses = *(*[]apiv1beta1.MachineAddress)(unsafe.Pointer(&in.Addresses)) + out.Addresses = *(*[]corev1beta1.MachineAddress)(unsafe.Pointer(&in.Addresses)) out.PrivateIP = (*string)(unsafe.Pointer(in.PrivateIP)) out.PublicIP = (*string)(unsafe.Pointer(in.PublicIP)) out.ENASupport = (*bool)(unsafe.Pointer(in.ENASupport)) @@ -2040,7 +2016,7 @@ func autoConvert_v1beta2_Instance_To_v1beta1_Instance(in *v1beta2.Instance, out out.SecurityGroupIDs = *(*[]string)(unsafe.Pointer(&in.SecurityGroupIDs)) out.UserData = (*string)(unsafe.Pointer(in.UserData)) out.IAMProfile = in.IAMProfile - out.Addresses = *(*[]apiv1beta1.MachineAddress)(unsafe.Pointer(&in.Addresses)) + out.Addresses = *(*[]corev1beta1.MachineAddress)(unsafe.Pointer(&in.Addresses)) out.PrivateIP = (*string)(unsafe.Pointer(in.PrivateIP)) out.PublicIP = (*string)(unsafe.Pointer(in.PublicIP)) out.ENASupport = (*bool)(unsafe.Pointer(in.ENASupport)) @@ -2063,6 +2039,7 @@ func autoConvert_v1beta2_Instance_To_v1beta1_Instance(in *v1beta2.Instance, out // WARNING: in.MarketType requires manual conversion: does not exist in peer-type // WARNING: in.HostAffinity requires manual conversion: does not exist in peer-type // WARNING: in.HostID requires manual conversion: does not exist in peer-type + // WARNING: in.DynamicHostAllocation requires manual conversion: does not exist in peer-type // WARNING: in.CapacityReservationPreference requires manual conversion: does not exist in peer-type // WARNING: in.CPUOptions requires manual conversion: does not exist in peer-type return nil diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index b3f9c154cf..28f0fe8907 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -23,7 +23,7 @@ package v1beta1 import ( "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" - apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + corev1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -409,7 +409,7 @@ func (in *AWSClusterStatus) DeepCopyInto(out *AWSClusterStatus) { in.Network.DeepCopyInto(&out.Network) if in.FailureDomains != nil { in, out := &in.FailureDomains, &out.FailureDomains - *out = make(apiv1beta1.FailureDomains, len(*in)) + *out = make(corev1beta1.FailureDomains, len(*in)) for key, val := range *in { (*out)[key] = *val.DeepCopy() } @@ -421,7 +421,7 @@ func (in *AWSClusterStatus) DeepCopyInto(out *AWSClusterStatus) { } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -741,7 +741,7 @@ func (in *AWSMachineStatus) DeepCopyInto(out *AWSMachineStatus) { *out = *in if in.Addresses != nil { in, out := &in.Addresses, &out.Addresses - *out = make([]apiv1beta1.MachineAddress, len(*in)) + *out = make([]corev1beta1.MachineAddress, len(*in)) copy(*out, *in) } if in.InstanceState != nil { @@ -761,7 +761,7 @@ func (in *AWSMachineStatus) DeepCopyInto(out *AWSMachineStatus) { } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1305,7 +1305,7 @@ func (in *Instance) DeepCopyInto(out *Instance) { } if in.Addresses != nil { in, out := &in.Addresses, &out.Addresses - *out = make([]apiv1beta1.MachineAddress, len(*in)) + *out = make([]corev1beta1.MachineAddress, len(*in)) copy(*out, *in) } if in.PrivateIP != nil { diff --git a/api/v1beta2/awscluster_types.go b/api/v1beta2/awscluster_types.go index 213ad99c56..184ef9de43 100644 --- a/api/v1beta2/awscluster_types.go +++ b/api/v1beta2/awscluster_types.go @@ -19,7 +19,7 @@ package v1beta2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( @@ -49,7 +49,7 @@ type AWSClusterSpec struct { // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. // +optional - ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"` + ControlPlaneEndpoint clusterv1beta1.APIEndpoint `json:"controlPlaneEndpoint"` // AdditionalTags is an optional set of tags to add to AWS resources managed by the AWS provider, in addition to the // ones added by default. @@ -276,11 +276,11 @@ type AdditionalListenerSpec struct { // AWSClusterStatus defines the observed state of AWSCluster. type AWSClusterStatus struct { // +kubebuilder:default=false - Ready bool `json:"ready"` - Network NetworkStatus `json:"networkStatus,omitempty"` - FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` - Bastion *Instance `json:"bastion,omitempty"` - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Ready bool `json:"ready"` + Network NetworkStatus `json:"networkStatus,omitempty"` + FailureDomains clusterv1beta1.FailureDomains `json:"failureDomains,omitempty"` + Bastion *Instance `json:"bastion,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` } // S3Bucket defines a supporting S3 bucket for the cluster, currently can be optionally used for Ignition. @@ -346,12 +346,12 @@ type AWSClusterList struct { } // GetConditions returns the observations of the operational state of the AWSCluster resource. -func (r *AWSCluster) GetConditions() clusterv1.Conditions { +func (r *AWSCluster) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the AWSCluster to the predescribed clusterv1.Conditions. -func (r *AWSCluster) SetConditions(conditions clusterv1.Conditions) { +// SetConditions sets the underlying service state of the AWSCluster to the predescribed clusterv1beta1.Conditions. +func (r *AWSCluster) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/api/v1beta2/awscluster_webhook.go b/api/v1beta2/awscluster_webhook.go index ec4fac40af..ddef7446bf 100644 --- a/api/v1beta2/awscluster_webhook.go +++ b/api/v1beta2/awscluster_webhook.go @@ -30,7 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/annotations" ) @@ -130,7 +130,7 @@ func (*awsClusterWebhook) ValidateUpdate(_ context.Context, oldObj, newObj runti allErrs = append(allErrs, r.validateControlPlaneLoadBalancerUpdate(oldLB, newLB)...) } - if !cmp.Equal(oldC.Spec.ControlPlaneEndpoint, clusterv1.APIEndpoint{}) && + if !cmp.Equal(oldC.Spec.ControlPlaneEndpoint, clusterv1beta1.APIEndpoint{}) && !cmp.Equal(r.Spec.ControlPlaneEndpoint, oldC.Spec.ControlPlaneEndpoint) { allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "controlPlaneEndpoint"), r.Spec.ControlPlaneEndpoint, "field is immutable"), diff --git a/api/v1beta2/awscluster_webhook_test.go b/api/v1beta2/awscluster_webhook_test.go index ad1b22d5fb..558277a55e 100644 --- a/api/v1beta2/awscluster_webhook_test.go +++ b/api/v1beta2/awscluster_webhook_test.go @@ -33,7 +33,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/feature" "sigs.k8s.io/cluster-api-provider-aws/v2/util/defaulting" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func TestAWSClusterDefault(t *testing.T) { @@ -945,7 +945,7 @@ func TestAWSClusterValidateUpdate(t *testing.T) { name: "controlPlaneEndpoint is immutable", oldCluster: &AWSCluster{ Spec: AWSClusterSpec{ - ControlPlaneEndpoint: clusterv1.APIEndpoint{ + ControlPlaneEndpoint: clusterv1beta1.APIEndpoint{ Host: "example.com", Port: int32(8000), }, @@ -953,7 +953,7 @@ func TestAWSClusterValidateUpdate(t *testing.T) { }, newCluster: &AWSCluster{ Spec: AWSClusterSpec{ - ControlPlaneEndpoint: clusterv1.APIEndpoint{ + ControlPlaneEndpoint: clusterv1beta1.APIEndpoint{ Host: "foo.example.com", Port: int32(9000), }, @@ -965,12 +965,12 @@ func TestAWSClusterValidateUpdate(t *testing.T) { name: "controlPlaneEndpoint can be updated if it is empty", oldCluster: &AWSCluster{ Spec: AWSClusterSpec{ - ControlPlaneEndpoint: clusterv1.APIEndpoint{}, + ControlPlaneEndpoint: clusterv1beta1.APIEndpoint{}, }, }, newCluster: &AWSCluster{ Spec: AWSClusterSpec{ - ControlPlaneEndpoint: clusterv1.APIEndpoint{ + ControlPlaneEndpoint: clusterv1beta1.APIEndpoint{ Host: "example.com", Port: int32(8000), }, @@ -982,7 +982,7 @@ func TestAWSClusterValidateUpdate(t *testing.T) { name: "removal of externally managed annotation is not allowed", oldCluster: &AWSCluster{ ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{clusterv1.ManagedByAnnotation: ""}, + Annotations: map[string]string{clusterv1beta1.ManagedByAnnotation: ""}, }, }, newCluster: &AWSCluster{}, @@ -993,7 +993,7 @@ func TestAWSClusterValidateUpdate(t *testing.T) { oldCluster: &AWSCluster{}, newCluster: &AWSCluster{ ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{clusterv1.ManagedByAnnotation: ""}, + Annotations: map[string]string{clusterv1beta1.ManagedByAnnotation: ""}, }, }, wantErr: false, diff --git a/api/v1beta2/awsclustertemplate_types.go b/api/v1beta2/awsclustertemplate_types.go index e0a827fa3d..2f81400eef 100644 --- a/api/v1beta2/awsclustertemplate_types.go +++ b/api/v1beta2/awsclustertemplate_types.go @@ -19,7 +19,7 @@ package v1beta2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // AWSClusterTemplateSpec defines the desired state of AWSClusterTemplate. @@ -59,6 +59,6 @@ type AWSClusterTemplateResource struct { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - ObjectMeta clusterv1.ObjectMeta `json:"metadata,omitempty"` - Spec AWSClusterSpec `json:"spec"` + ObjectMeta clusterv1beta1.ObjectMeta `json:"metadata,omitempty"` + Spec AWSClusterSpec `json:"spec"` } diff --git a/api/v1beta2/awsmachine_types.go b/api/v1beta2/awsmachine_types.go index 7031bdbaae..2ff52d71e0 100644 --- a/api/v1beta2/awsmachine_types.go +++ b/api/v1beta2/awsmachine_types.go @@ -19,7 +19,7 @@ package v1beta2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( @@ -218,6 +218,10 @@ type AWSMachineSpec struct { PlacementGroupPartition int64 `json:"placementGroupPartition,omitempty"` // Tenancy indicates if instance should run on shared or single-tenant hardware. + // When Tenancy=host, AWS will attempt to find a suitable host from: + // - Preexisting allocated hosts that have auto-placement enabled + // - A specific host ID, if configured + // - Allocating a new dedicated host if DynamicHostAllocation is configured // +optional // +kubebuilder:validation:Enum:=default;dedicated;host Tenancy string `json:"tenancy,omitempty"` @@ -240,17 +244,28 @@ type AWSMachineSpec struct { MarketType MarketType `json:"marketType,omitempty"` // HostID specifies the Dedicated Host on which the instance must be started. + // This field is mutually exclusive with DynamicHostAllocation. + // +kubebuilder:validation:Pattern=`^h-[0-9a-f]{17}$` + // +kubebuilder:validation:MaxLength=19 // +optional HostID *string `json:"hostID,omitempty"` // HostAffinity specifies the dedicated host affinity setting for the instance. - // When hostAffinity is set to host, an instance started onto a specific host always restarts on the same host if stopped. - // When hostAffinity is set to default, and you stop and restart the instance, it can be restarted on any available host. + // When HostAffinity is set to host, an instance started onto a specific host always restarts on the same host if stopped. + // When HostAffinity is set to default, and you stop and restart the instance, it can be restarted on any available host. // When HostAffinity is defined, HostID is required. // +optional // +kubebuilder:validation:Enum:=default;host + // +kubebuilder:default=host HostAffinity *string `json:"hostAffinity,omitempty"` + // DynamicHostAllocation enables automatic allocation of a single dedicated host. + // This field is mutually exclusive with HostID and always allocates exactly one host. + // Cost effectiveness of allocating a single instance on a dedicated host may vary + // depending on the instance type and the region. + // +optional + DynamicHostAllocation *DynamicHostAllocationSpec `json:"dynamicHostAllocation,omitempty"` + // CapacityReservationPreference specifies the preference for use of Capacity Reservations by the instance. Valid values include: // "Open": The instance may make use of open Capacity Reservations that match its AZ and InstanceType // "None": The instance may not make use of any Capacity Reservations. This is to conserve open reservations for desired workloads @@ -260,6 +275,14 @@ type AWSMachineSpec struct { CapacityReservationPreference CapacityReservationPreference `json:"capacityReservationPreference,omitempty"` } +// DynamicHostAllocationSpec defines the configuration for dynamic dedicated host allocation. +// This specification always allocates exactly one dedicated host per machine. +type DynamicHostAllocationSpec struct { + // Tags to apply to the allocated dedicated host. + // +optional + Tags map[string]string `json:"tags,omitempty"` +} + // CloudInit defines options related to the bootstrapping systems where // CloudInit is used. type CloudInit struct { @@ -391,7 +414,7 @@ type AWSMachineStatus struct { Interruptible bool `json:"interruptible,omitempty"` // Addresses contains the AWS instance associated addresses. - Addresses []clusterv1.MachineAddress `json:"addresses,omitempty"` + Addresses []clusterv1beta1.MachineAddress `json:"addresses,omitempty"` // InstanceState is the state of the AWS instance for this machine. // +optional @@ -437,7 +460,21 @@ type AWSMachineStatus struct { // Conditions defines current service state of the AWSMachine. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` + + // DedicatedHost tracks the dynamically allocated dedicated host. + // This field is populated when DynamicHostAllocation is used. + // +optional + DedicatedHost *DedicatedHostStatus `json:"dedicatedHost,omitempty"` +} + +// DedicatedHostStatus defines the observed state of a dynamically allocated dedicated host +// associated with an AWSMachine. This struct is used to track the ID of the dedicated host. +type DedicatedHostStatus struct { + // ID tracks the dynamically allocated dedicated host ID. + // This field is populated when DynamicHostAllocation is used. + // +optional + ID *string `json:"id,omitempty"` } // +kubebuilder:object:root=true @@ -461,12 +498,12 @@ type AWSMachine struct { } // GetConditions returns the observations of the operational state of the AWSMachine resource. -func (r *AWSMachine) GetConditions() clusterv1.Conditions { +func (r *AWSMachine) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the AWSMachine to the predescribed clusterv1.Conditions. -func (r *AWSMachine) SetConditions(conditions clusterv1.Conditions) { +// SetConditions sets the underlying service state of the AWSMachine to the predescribed clusterv1beta1.Conditions. +func (r *AWSMachine) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/api/v1beta2/awsmachine_webhook.go b/api/v1beta2/awsmachine_webhook.go index 3cb24f5cbe..9c271c6939 100644 --- a/api/v1beta2/awsmachine_webhook.go +++ b/api/v1beta2/awsmachine_webhook.go @@ -75,11 +75,11 @@ func (*awsMachineWebhook) ValidateCreate(_ context.Context, obj runtime.Object) allErrs = append(allErrs, r.validateNonRootVolumes()...) allErrs = append(allErrs, r.validateSSHKeyName()...) allErrs = append(allErrs, r.validateAdditionalSecurityGroups()...) - allErrs = append(allErrs, r.validateHostAffinity()...) allErrs = append(allErrs, r.Spec.AdditionalTags.Validate()...) allErrs = append(allErrs, r.validateNetworkElasticIPPool()...) allErrs = append(allErrs, r.validateInstanceMarketType()...) allErrs = append(allErrs, r.validateCapacityReservation()...) + allErrs = append(allErrs, r.validateHostAllocation()...) return nil, aggregateObjErrors(r.GroupVersionKind().GroupKind(), r.Name, allErrs) } @@ -109,7 +109,7 @@ func (*awsMachineWebhook) ValidateUpdate(ctx context.Context, oldObj, newObj run allErrs = append(allErrs, r.validateCloudInitSecret()...) allErrs = append(allErrs, r.validateAdditionalSecurityGroups()...) allErrs = append(allErrs, r.Spec.AdditionalTags.Validate()...) - allErrs = append(allErrs, r.validateHostAffinity()...) + allErrs = append(allErrs, r.validateHostAllocation()...) newAWSMachineSpec := newAWSMachine["spec"].(map[string]interface{}) oldAWSMachineSpec := oldAWSMachine["spec"].(map[string]interface{}) @@ -474,14 +474,17 @@ func (r *AWSMachine) validateAdditionalSecurityGroups() field.ErrorList { return allErrs } -func (r *AWSMachine) validateHostAffinity() field.ErrorList { +func (r *AWSMachine) validateHostAllocation() field.ErrorList { var allErrs field.ErrorList - if r.Spec.HostAffinity != nil { - if r.Spec.HostID == nil || len(*r.Spec.HostID) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("spec.hostID"), "hostID must be set when hostAffinity is configured")) - } + // Check if both hostID and dynamicHostAllocation are specified + hasHostID := r.Spec.HostID != nil && len(*r.Spec.HostID) > 0 + hasDynamicHostAllocation := r.Spec.DynamicHostAllocation != nil + + if hasHostID && hasDynamicHostAllocation { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec.hostID"), "hostID and dynamicHostAllocation are mutually exclusive"), field.Forbidden(field.NewPath("spec.dynamicHostAllocation"), "hostID and dynamicHostAllocation are mutually exclusive")) } + return allErrs } diff --git a/api/v1beta2/awsmachine_webhook_test.go b/api/v1beta2/awsmachine_webhook_test.go index 66c0919ecb..d233cde8d5 100644 --- a/api/v1beta2/awsmachine_webhook_test.go +++ b/api/v1beta2/awsmachine_webhook_test.go @@ -489,16 +489,6 @@ func TestAWSMachineCreate(t *testing.T) { }, wantErr: true, }, - { - name: "configure host affinity without Host ID", - machine: &AWSMachine{ - Spec: AWSMachineSpec{ - InstanceType: "test", - HostAffinity: ptr.To("default"), - }, - }, - wantErr: true, - }, { name: "create with valid BYOIPv4", machine: &AWSMachine{ @@ -567,6 +557,45 @@ func TestAWSMachineCreate(t *testing.T) { }, wantErr: true, }, + { + name: "hostID and dynamicHostAllocation are mutually exclusive", + machine: &AWSMachine{ + Spec: AWSMachineSpec{ + InstanceType: "test", + HostID: aws.String("h-1234567890abcdef0"), + DynamicHostAllocation: &DynamicHostAllocationSpec{ + Tags: map[string]string{ + "Environment": "test", + }, + }, + }, + }, + wantErr: true, + }, + { + name: "hostID alone is valid", + machine: &AWSMachine{ + Spec: AWSMachineSpec{ + InstanceType: "test", + HostID: aws.String("h-1234567890abcdef0"), + }, + }, + wantErr: false, + }, + { + name: "dynamicHostAllocation alone is valid", + machine: &AWSMachine{ + Spec: AWSMachineSpec{ + InstanceType: "test", + DynamicHostAllocation: &DynamicHostAllocationSpec{ + Tags: map[string]string{ + "Environment": "test", + }, + }, + }, + }, + wantErr: false, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/api/v1beta2/awsmachinetemplate_types.go b/api/v1beta2/awsmachinetemplate_types.go index 50d8dda22d..12829090f2 100644 --- a/api/v1beta2/awsmachinetemplate_types.go +++ b/api/v1beta2/awsmachinetemplate_types.go @@ -20,9 +20,45 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) +// Architecture represents the CPU architecture of the node. +// Its underlying type is a string and its value can be any of amd64, arm64. +type Architecture string + +// Architecture constants. +const ( + ArchitectureAmd64 Architecture = "amd64" + ArchitectureArm64 Architecture = "arm64" +) + +// OperatingSystem represents the operating system of the node. +// Its underlying type is a string and its value can be any of linux, windows. +type OperatingSystem string + +// Operating system constants. +const ( + // OperatingSystemLinux represents the Linux operating system. + OperatingSystemLinux OperatingSystem = "linux" + // OperatingSystemWindows represents the Windows operating system. + OperatingSystemWindows OperatingSystem = "windows" +) + +// NodeInfo contains information about the node's architecture and operating system. +type NodeInfo struct { + // Architecture is the CPU architecture of the node. + // Its underlying type is a string and its value can be any of amd64, arm64. + // +kubebuilder:validation:Enum=amd64;arm64 + // +optional + Architecture Architecture `json:"architecture,omitempty"` + // OperatingSystem is the operating system of the node. + // Its underlying type is a string and its value can be any of linux, windows. + // +kubebuilder:validation:Enum=linux;windows + // +optional + OperatingSystem OperatingSystem `json:"operatingSystem,omitempty"` +} + // AWSMachineTemplateStatus defines a status for an AWSMachineTemplate. type AWSMachineTemplateStatus struct { // Capacity defines the resource capacity for this machine. @@ -30,6 +66,16 @@ type AWSMachineTemplateStatus struct { // https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20210310-opt-in-autoscaling-from-zero.md // +optional Capacity corev1.ResourceList `json:"capacity,omitempty"` + + // NodeInfo contains information about the node's architecture and operating system. + // This value is used for autoscaling from zero operations as defined in: + // https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20210310-opt-in-autoscaling-from-zero.md + // +optional + NodeInfo *NodeInfo `json:"nodeInfo,omitempty"` + + // Conditions defines current service state of the AWSMachineTemplate. + // +optional + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` } // AWSMachineTemplateSpec defines the desired state of AWSMachineTemplate. @@ -40,6 +86,7 @@ type AWSMachineTemplateSpec struct { // +kubebuilder:object:root=true // +kubebuilder:resource:path=awsmachinetemplates,scope=Namespaced,categories=cluster-api,shortName=awsmt // +kubebuilder:storageversion +// +kubebuilder:subresource:status // +k8s:defaulter-gen=true // AWSMachineTemplate is the schema for the Amazon EC2 Machine Templates API. @@ -65,12 +112,22 @@ type AWSMachineTemplateResource struct { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata // +optional - ObjectMeta clusterv1.ObjectMeta `json:"metadata,omitempty"` + ObjectMeta clusterv1beta1.ObjectMeta `json:"metadata,omitempty"` // Spec is the specification of the desired behavior of the machine. Spec AWSMachineSpec `json:"spec"` } +// GetConditions returns the observations of the operational state of the AWSMachineTemplate resource. +func (r *AWSMachineTemplate) GetConditions() clusterv1beta1.Conditions { + return r.Status.Conditions +} + +// SetConditions sets the underlying service state of the AWSMachineTemplate to the predescribed clusterv1beta1.Conditions. +func (r *AWSMachineTemplate) SetConditions(conditions clusterv1beta1.Conditions) { + r.Status.Conditions = conditions +} + func init() { SchemeBuilder.Register(&AWSMachineTemplate{}, &AWSMachineTemplateList{}) } diff --git a/api/v1beta2/awsmachinetemplate_webhook.go b/api/v1beta2/awsmachinetemplate_webhook.go index 65e4e0ca32..7180939d53 100644 --- a/api/v1beta2/awsmachinetemplate_webhook.go +++ b/api/v1beta2/awsmachinetemplate_webhook.go @@ -172,6 +172,23 @@ func (r *AWSMachineTemplate) validateIgnitionAndCloudInit() field.ErrorList { return allErrs } + +func (r *AWSMachineTemplate) validateHostAllocation() field.ErrorList { + var allErrs field.ErrorList + + spec := r.Spec.Template.Spec + + // Check if both hostID and dynamicHostAllocation are specified + hasHostID := spec.HostID != nil && len(*spec.HostID) > 0 + hasDynamicHostAllocation := spec.DynamicHostAllocation != nil + + if hasHostID && hasDynamicHostAllocation { + allErrs = append(allErrs, field.Forbidden(field.NewPath("spec.template.spec.hostID"), "hostID and dynamicHostAllocation are mutually exclusive"), field.Forbidden(field.NewPath("spec.template.spec.dynamicHostAllocation"), "hostID and dynamicHostAllocation are mutually exclusive")) + } + + return allErrs +} + func (r *AWSMachineTemplate) validateSSHKeyName() field.ErrorList { return validateSSHKeyName(r.Spec.Template.Spec.SSHKeyName) } @@ -205,6 +222,7 @@ func (r *AWSMachineTemplateWebhook) ValidateCreate(_ context.Context, raw runtim allErrs = append(allErrs, obj.validateSSHKeyName()...) allErrs = append(allErrs, obj.validateAdditionalSecurityGroups()...) allErrs = append(allErrs, obj.Spec.Template.Spec.AdditionalTags.Validate()...) + allErrs = append(allErrs, obj.validateHostAllocation()...) return nil, aggregateObjErrors(obj.GroupVersionKind().GroupKind(), obj.Name, allErrs) } @@ -227,7 +245,7 @@ func (r *AWSMachineTemplateWebhook) ValidateUpdate(ctx context.Context, oldRaw r var allErrs field.ErrorList - if !topology.ShouldSkipImmutabilityChecks(req, newAWSMachineTemplate) && !cmp.Equal(newAWSMachineTemplate.Spec, oldAWSMachineTemplate.Spec) { + if !topology.IsDryRunRequest(req, newAWSMachineTemplate) && !cmp.Equal(newAWSMachineTemplate.Spec, oldAWSMachineTemplate.Spec) { if oldAWSMachineTemplate.Spec.Template.Spec.InstanceMetadataOptions == nil { oldAWSMachineTemplate.Spec.Template.Spec.InstanceMetadataOptions = newAWSMachineTemplate.Spec.Template.Spec.InstanceMetadataOptions } diff --git a/api/v1beta2/awsmachinetemplate_webhook_test.go b/api/v1beta2/awsmachinetemplate_webhook_test.go index ce355d1e4b..1aefb0d260 100644 --- a/api/v1beta2/awsmachinetemplate_webhook_test.go +++ b/api/v1beta2/awsmachinetemplate_webhook_test.go @@ -80,6 +80,26 @@ func TestAWSMachineTemplateValidateCreate(t *testing.T) { }, wantError: false, }, + { + name: "hostID and dynamicHostAllocation are mutually exclusive", + inputTemplate: &AWSMachineTemplate{ + ObjectMeta: metav1.ObjectMeta{}, + Spec: AWSMachineTemplateSpec{ + Template: AWSMachineTemplateResource{ + Spec: AWSMachineSpec{ + InstanceType: "test", + HostID: aws.String("h-1234567890abcdef0"), + DynamicHostAllocation: &DynamicHostAllocationSpec{ + Tags: map[string]string{ + "Environment": "test", + }, + }, + }, + }, + }, + }, + wantError: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/api/v1beta2/awsmanagedcluster_types.go b/api/v1beta2/awsmanagedcluster_types.go index 67d9b2fc92..ce04e18669 100644 --- a/api/v1beta2/awsmanagedcluster_types.go +++ b/api/v1beta2/awsmanagedcluster_types.go @@ -19,14 +19,14 @@ package v1beta2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // AWSManagedClusterSpec defines the desired state of AWSManagedCluster type AWSManagedClusterSpec struct { // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. // +optional - ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"` + ControlPlaneEndpoint clusterv1beta1.APIEndpoint `json:"controlPlaneEndpoint"` } // AWSManagedClusterStatus defines the observed state of AWSManagedCluster @@ -37,11 +37,11 @@ type AWSManagedClusterStatus struct { // FailureDomains specifies a list fo available availability zones that can be used // +optional - FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` + FailureDomains clusterv1beta1.FailureDomains `json:"failureDomains,omitempty"` // Conditions defines current service state of the AWSManagedCluster. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -76,12 +76,12 @@ func init() { // GetConditions returns the observations of the operational state of the // AWSManagedCluster resource. -func (r *AWSManagedCluster) GetConditions() clusterv1.Conditions { +func (r *AWSManagedCluster) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } // SetConditions sets the underlying service state of the AWSManagedCluster to -// the predescribed clusterv1.Conditions. -func (r *AWSManagedCluster) SetConditions(conditions clusterv1.Conditions) { +// the predescribed clusterv1beta1.Conditions. +func (r *AWSManagedCluster) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/api/v1beta2/conditions_consts.go b/api/v1beta2/conditions_consts.go index 604ef8e1d5..1175ac9916 100644 --- a/api/v1beta2/conditions_consts.go +++ b/api/v1beta2/conditions_consts.go @@ -16,19 +16,19 @@ limitations under the License. package v1beta2 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +import clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" const ( // PrincipalCredentialRetrievedCondition reports on whether Principal credentials could be retrieved successfully. // A possible scenario, where retrieval is unsuccessful, is when SourcePrincipal is not authorized for assume role. - PrincipalCredentialRetrievedCondition clusterv1.ConditionType = "PrincipalCredentialRetrieved" + PrincipalCredentialRetrievedCondition clusterv1beta1.ConditionType = "PrincipalCredentialRetrieved" // PrincipalCredentialRetrievalFailedReason used when errors occur during identity credential retrieval. PrincipalCredentialRetrievalFailedReason = "PrincipalCredentialRetrievalFailed" // CredentialProviderBuildFailedReason used when errors occur during building providers before trying credential retrieval. //nolint:gosec CredentialProviderBuildFailedReason = "CredentialProviderBuildFailed" // PrincipalUsageAllowedCondition reports on whether Principal and all the nested source identities are allowed to be used in the AWSCluster namespace. - PrincipalUsageAllowedCondition clusterv1.ConditionType = "PrincipalUsageAllowed" + PrincipalUsageAllowedCondition clusterv1beta1.ConditionType = "PrincipalUsageAllowed" // PrincipalUsageUnauthorizedReason used when AWSCluster namespace is not in the identity's allowed namespaces list. PrincipalUsageUnauthorizedReason = "PrincipalUsageUnauthorized" // SourcePrincipalUsageUnauthorizedReason used when AWSCluster is not in the intersection of source identity allowed namespaces @@ -38,7 +38,7 @@ const ( const ( // VpcReadyCondition reports on the successful reconciliation of a VPC. - VpcReadyCondition clusterv1.ConditionType = "VpcReady" + VpcReadyCondition clusterv1beta1.ConditionType = "VpcReady" // VpcCreationStartedReason used when attempting to create a VPC for a managed cluster. // Will not be applied to unmanaged clusters. VpcCreationStartedReason = "VpcCreationStarted" @@ -48,7 +48,7 @@ const ( const ( // SubnetsReadyCondition reports on the successful reconciliation of subnets. - SubnetsReadyCondition clusterv1.ConditionType = "SubnetsReady" + SubnetsReadyCondition clusterv1beta1.ConditionType = "SubnetsReady" // SubnetsReconciliationFailedReason used to report failures while reconciling subnets. SubnetsReconciliationFailedReason = "SubnetsReconciliationFailed" ) @@ -56,7 +56,7 @@ const ( const ( // InternetGatewayReadyCondition reports on the successful reconciliation of internet gateways. // Only applicable to managed clusters. - InternetGatewayReadyCondition clusterv1.ConditionType = "InternetGatewayReady" + InternetGatewayReadyCondition clusterv1beta1.ConditionType = "InternetGatewayReady" // InternetGatewayFailedReason used when errors occur during internet gateway reconciliation. InternetGatewayFailedReason = "InternetGatewayFailed" ) @@ -64,7 +64,7 @@ const ( const ( // EgressOnlyInternetGatewayReadyCondition reports on the successful reconciliation of egress only internet gateways. // Only applicable to managed clusters. - EgressOnlyInternetGatewayReadyCondition clusterv1.ConditionType = "EgressOnlyInternetGatewayReady" + EgressOnlyInternetGatewayReadyCondition clusterv1beta1.ConditionType = "EgressOnlyInternetGatewayReady" // EgressOnlyInternetGatewayFailedReason used when errors occur during egress only internet gateway reconciliation. EgressOnlyInternetGatewayFailedReason = "EgressOnlyInternetGatewayFailed" ) @@ -72,7 +72,7 @@ const ( const ( // CarrierGatewayReadyCondition reports on the successful reconciliation of carrier gateways. // Only applicable to managed clusters. - CarrierGatewayReadyCondition clusterv1.ConditionType = "CarrierGatewayReady" + CarrierGatewayReadyCondition clusterv1beta1.ConditionType = "CarrierGatewayReady" // CarrierGatewayFailedReason used when errors occur during carrier gateway reconciliation. CarrierGatewayFailedReason = "CarrierGatewayFailed" ) @@ -80,7 +80,7 @@ const ( const ( // NatGatewaysReadyCondition reports successful reconciliation of NAT gateways. // Only applicable to managed clusters. - NatGatewaysReadyCondition clusterv1.ConditionType = "NatGatewaysReady" + NatGatewaysReadyCondition clusterv1beta1.ConditionType = "NatGatewaysReady" // NatGatewaysCreationStartedReason set once when creating new NAT gateways. NatGatewaysCreationStartedReason = "NatGatewaysCreationStarted" // NatGatewaysReconciliationFailedReason used when any errors occur during reconciliation of NAT gateways. @@ -90,7 +90,7 @@ const ( const ( // RouteTablesReadyCondition reports successful reconciliation of route tables. // Only applicable to managed clusters. - RouteTablesReadyCondition clusterv1.ConditionType = "RouteTablesReady" + RouteTablesReadyCondition clusterv1beta1.ConditionType = "RouteTablesReady" // RouteTableReconciliationFailedReason used when any errors occur during reconciliation of route tables. RouteTableReconciliationFailedReason = "RouteTableReconciliationFailed" ) @@ -98,7 +98,7 @@ const ( const ( // VpcEndpointsReadyCondition reports successful reconciliation of vpc endpoints. // Only applicable to managed clusters. - VpcEndpointsReadyCondition clusterv1.ConditionType = "VpcEndpointsReadyCondition" + VpcEndpointsReadyCondition clusterv1beta1.ConditionType = "VpcEndpointsReadyCondition" // VpcEndpointsReconciliationFailedReason used when any errors occur during reconciliation of vpc endpoints. VpcEndpointsReconciliationFailedReason = "VpcEndpointsReconciliationFailed" ) @@ -106,14 +106,14 @@ const ( const ( // SecondaryCidrsReadyCondition reports successful reconciliation of secondary CIDR blocks. // Only applicable to managed clusters. - SecondaryCidrsReadyCondition clusterv1.ConditionType = "SecondaryCidrsReady" + SecondaryCidrsReadyCondition clusterv1beta1.ConditionType = "SecondaryCidrsReady" // SecondaryCidrReconciliationFailedReason used when any errors occur during reconciliation of secondary CIDR blocks. SecondaryCidrReconciliationFailedReason = "SecondaryCidrReconciliationFailed" ) const ( // ClusterSecurityGroupsReadyCondition reports successful reconciliation of security groups. - ClusterSecurityGroupsReadyCondition clusterv1.ConditionType = "ClusterSecurityGroupsReady" + ClusterSecurityGroupsReadyCondition clusterv1beta1.ConditionType = "ClusterSecurityGroupsReady" // ClusterSecurityGroupReconciliationFailedReason used when any errors occur during reconciliation of security groups. ClusterSecurityGroupReconciliationFailedReason = "SecurityGroupReconciliationFailed" ) @@ -121,7 +121,7 @@ const ( const ( // BastionHostReadyCondition reports whether a bastion host is ready. Depending on the configuration, a cluster // may not require a bastion host and this condition will be skipped. - BastionHostReadyCondition clusterv1.ConditionType = "BastionHostReady" + BastionHostReadyCondition clusterv1beta1.ConditionType = "BastionHostReady" // BastionCreationStartedReason used when creating a new bastion host. BastionCreationStartedReason = "BastionCreationStarted" // BastionHostFailedReason used when an error occurs during the creation of a bastion host. @@ -130,7 +130,7 @@ const ( const ( // LoadBalancerReadyCondition reports on whether a control plane load balancer was successfully reconciled. - LoadBalancerReadyCondition clusterv1.ConditionType = "LoadBalancerReady" + LoadBalancerReadyCondition clusterv1beta1.ConditionType = "LoadBalancerReady" // WaitForDNSNameReason used while waiting for a DNS name for the API server to be populated. WaitForDNSNameReason = "WaitForDNSName" // WaitForExternalControlPlaneEndpointReason is available when the AWS Cluster is waiting for an externally managed @@ -144,7 +144,11 @@ const ( const ( // InstanceReadyCondition reports on current status of the EC2 instance. Ready indicates the instance is in a Running state. - InstanceReadyCondition clusterv1.ConditionType = "InstanceReady" + InstanceReadyCondition clusterv1beta1.ConditionType = "InstanceReady" + + // DedicatedHostReleaseCondition reports on the status of dedicated host release operations. + // This condition tracks whether the dedicated host has been successfully released or if there are failures. + DedicatedHostReleaseCondition clusterv1beta1.ConditionType = "DedicatedHostRelease" // InstanceNotFoundReason used when the instance couldn't be retrieved. InstanceNotFoundReason = "InstanceNotFound" @@ -166,7 +170,7 @@ const ( const ( // SecurityGroupsReadyCondition indicates the security groups are up to date on the AWSMachine. - SecurityGroupsReadyCondition clusterv1.ConditionType = "SecurityGroupsReady" + SecurityGroupsReadyCondition clusterv1beta1.ConditionType = "SecurityGroupsReady" // SecurityGroupsFailedReason used when the security groups could not be synced. SecurityGroupsFailedReason = "SecurityGroupsSyncFailed" @@ -177,7 +181,7 @@ const ( // When set to false, severity can be an Error if the subnet is not found or unavailable in the instance's AZ. // Note this is only applicable to control plane machines. // Only applicable to control plane machines. - ELBAttachedCondition clusterv1.ConditionType = "ELBAttached" + ELBAttachedCondition clusterv1beta1.ConditionType = "ELBAttached" // ELBAttachFailedReason used when a control plane node fails to attach to the ELB. ELBAttachFailedReason = "ELBAttachFailed" @@ -187,8 +191,11 @@ const ( const ( // S3BucketReadyCondition indicates an S3 bucket has been created successfully. - S3BucketReadyCondition clusterv1.ConditionType = "S3BucketCreated" + S3BucketReadyCondition clusterv1beta1.ConditionType = "S3BucketCreated" // S3BucketFailedReason is used when any errors occur during reconciliation of an S3 bucket. S3BucketFailedReason = "S3BucketCreationFailed" + + // DedicatedHostReleaseFailedReason used when the dedicated host release fails. + DedicatedHostReleaseFailedReason = "DedicatedHostReleaseFailed" ) diff --git a/api/v1beta2/tags.go b/api/v1beta2/tags.go index 45bc371a49..764d06f73d 100644 --- a/api/v1beta2/tags.go +++ b/api/v1beta2/tags.go @@ -23,7 +23,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/validation/field" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // Tags defines a map of tags. @@ -78,7 +78,7 @@ func (t Tags) Validate() []*field.Error { // Defines the maximum number of user tags which can be created for a specific resource const maxUserTagsAllowed = 50 var errs field.ErrorList - var userTagCount = len(t) + userTagCount := len(t) re := regexp.MustCompile(`^[a-zA-Z0-9\s\_\.\:\=\+\-\@\/]*$`) for k, v := range t { diff --git a/api/v1beta2/types.go b/api/v1beta2/types.go index c268165c10..81a3be6db3 100644 --- a/api/v1beta2/types.go +++ b/api/v1beta2/types.go @@ -21,7 +21,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( @@ -192,7 +192,7 @@ type Instance struct { IAMProfile string `json:"iamProfile,omitempty"` // Addresses contains the AWS instance associated addresses. - Addresses []clusterv1.MachineAddress `json:"addresses,omitempty"` + Addresses []clusterv1beta1.MachineAddress `json:"addresses,omitempty"` // The private IPv4 address assigned to the instance. PrivateIP *string `json:"privateIp,omitempty"` @@ -286,6 +286,11 @@ type Instance struct { // +optional HostID *string `json:"hostID,omitempty"` + // DynamicHostAllocation enables automatic allocation of dedicated hosts. + // This field is mutually exclusive with HostID. + // +optional + DynamicHostAllocation *DynamicHostAllocationSpec `json:"dynamicHostAllocation,omitempty"` + // CapacityReservationPreference specifies the preference for use of Capacity Reservations by the instance. Valid values include: // "Open": The instance may make use of open Capacity Reservations that match its AZ and InstanceType // "None": The instance may not make use of any Capacity Reservations. This is to conserve open reservations for desired workloads @@ -316,6 +321,33 @@ const ( CapacityReservationPreferenceOpen CapacityReservationPreference = "Open" ) +// DedicatedHostInfo contains information about a dedicated host. +type DedicatedHostInfo struct { + // HostID is the ID of the dedicated host. + HostID string `json:"hostID"` + + // InstanceFamily is the instance family supported by the host. + InstanceFamily string `json:"instanceFamily"` + + // InstanceType is the instance type supported by the host. + InstanceType string `json:"instanceType"` + + // AvailabilityZone is the AZ where the host is located. + AvailabilityZone string `json:"availabilityZone"` + + // State is the current state of the dedicated host. + State string `json:"state"` + + // TotalCapacity is the total number of instances that can be launched on the host. + TotalCapacity int32 `json:"totalCapacity"` + + // AvailableCapacity is the number of instances that can still be launched on the host. + AvailableCapacity int32 `json:"availableCapacity"` + + // Tags associated with the dedicated host. + Tags map[string]string `json:"tags,omitempty"` +} + // MarketType describes the market type of an Instance // +kubebuilder:validation:Enum:=OnDemand;Spot;CapacityBlock type MarketType string diff --git a/api/v1beta2/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go index 197cffba66..1293074cb5 100644 --- a/api/v1beta2/zz_generated.deepcopy.go +++ b/api/v1beta2/zz_generated.deepcopy.go @@ -24,7 +24,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -782,6 +782,11 @@ func (in *AWSMachineSpec) DeepCopyInto(out *AWSMachineSpec) { *out = new(string) **out = **in } + if in.DynamicHostAllocation != nil { + in, out := &in.DynamicHostAllocation, &out.DynamicHostAllocation + *out = new(DynamicHostAllocationSpec) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineSpec. @@ -824,6 +829,11 @@ func (in *AWSMachineStatus) DeepCopyInto(out *AWSMachineStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.DedicatedHost != nil { + in, out := &in.DedicatedHost, &out.DedicatedHost + *out = new(DedicatedHostStatus) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineStatus. @@ -938,6 +948,18 @@ func (in *AWSMachineTemplateStatus) DeepCopyInto(out *AWSMachineTemplateStatus) (*out)[key] = val.DeepCopy() } } + if in.NodeInfo != nil { + in, out := &in.NodeInfo, &out.NodeInfo + *out = new(NodeInfo) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(v1beta1.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineTemplateStatus. @@ -1413,6 +1435,70 @@ func (in *CloudInit) DeepCopy() *CloudInit { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DedicatedHostInfo) DeepCopyInto(out *DedicatedHostInfo) { + *out = *in + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DedicatedHostInfo. +func (in *DedicatedHostInfo) DeepCopy() *DedicatedHostInfo { + if in == nil { + return nil + } + out := new(DedicatedHostInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DedicatedHostStatus) DeepCopyInto(out *DedicatedHostStatus) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DedicatedHostStatus. +func (in *DedicatedHostStatus) DeepCopy() *DedicatedHostStatus { + if in == nil { + return nil + } + out := new(DedicatedHostStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DynamicHostAllocationSpec) DeepCopyInto(out *DynamicHostAllocationSpec) { + *out = *in + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DynamicHostAllocationSpec. +func (in *DynamicHostAllocationSpec) DeepCopy() *DynamicHostAllocationSpec { + if in == nil { + return nil + } + out := new(DynamicHostAllocationSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ElasticIPPool) DeepCopyInto(out *ElasticIPPool) { *out = *in @@ -1736,6 +1822,11 @@ func (in *Instance) DeepCopyInto(out *Instance) { *out = new(string) **out = **in } + if in.DynamicHostAllocation != nil { + in, out := &in.DynamicHostAllocation, &out.DynamicHostAllocation + *out = new(DynamicHostAllocationSpec) + (*in).DeepCopyInto(*out) + } out.CPUOptions = in.CPUOptions } @@ -1934,6 +2025,21 @@ func (in *NetworkStatus) DeepCopy() *NetworkStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeInfo) DeepCopyInto(out *NodeInfo) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeInfo. +func (in *NodeInfo) DeepCopy() *NodeInfo { + if in == nil { + return nil + } + out := new(NodeInfo) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PrivateDNSName) DeepCopyInto(out *PrivateDNSName) { *out = *in diff --git a/bootstrap/eks/api/v1beta1/condition_consts.go b/bootstrap/eks/api/v1beta1/condition_consts.go index 86ef328727..6839d15999 100644 --- a/bootstrap/eks/api/v1beta1/condition_consts.go +++ b/bootstrap/eks/api/v1beta1/condition_consts.go @@ -16,7 +16,7 @@ limitations under the License. package v1beta1 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +import clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" // Conditions and condition Reasons for the EKSConfig object // FROM: https://github.com/kubernetes-sigs/cluster-api/blob/main/bootstrap/kubeadm/api/v1beta1/condition_consts.go @@ -27,7 +27,7 @@ const ( // NOTE: When the DataSecret generation starts the process completes immediately and within the // same reconciliation, so the user will always see a transition from Wait to Generated without having // evidence that BootstrapSecret generation is started/in progress. - DataSecretAvailableCondition clusterv1.ConditionType = "DataSecretAvailable" + DataSecretAvailableCondition clusterv1beta1.ConditionType = "DataSecretAvailable" // DataSecretGenerationFailedReason (Severity=Warning) documents a EKSConfig controller detecting // an error while generating a data secret; those kind of errors are usually due to misconfigurations diff --git a/bootstrap/eks/api/v1beta1/eksconfig_types.go b/bootstrap/eks/api/v1beta1/eksconfig_types.go index d268722878..8380ce1d7a 100644 --- a/bootstrap/eks/api/v1beta1/eksconfig_types.go +++ b/bootstrap/eks/api/v1beta1/eksconfig_types.go @@ -19,7 +19,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // EKSConfigSpec defines the desired state of Amazon EKS Bootstrap Configuration. @@ -84,7 +84,7 @@ type EKSConfigStatus struct { // Conditions defines current service state of the EKSConfig. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -104,12 +104,12 @@ type EKSConfig struct { } // GetConditions returns the observations of the operational state of the EKSConfig resource. -func (r *EKSConfig) GetConditions() clusterv1.Conditions { +func (r *EKSConfig) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the EKSConfig to the predescribed clusterv1.Conditions. -func (r *EKSConfig) SetConditions(conditions clusterv1.Conditions) { +// SetConditions sets the underlying service state of the EKSConfig to the predescribed clusterv1beta1.Conditions. +func (r *EKSConfig) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/bootstrap/eks/api/v1beta1/zz_generated.conversion.go b/bootstrap/eks/api/v1beta1/zz_generated.conversion.go index eba4f6f7ce..5c3034ff4a 100644 --- a/bootstrap/eks/api/v1beta1/zz_generated.conversion.go +++ b/bootstrap/eks/api/v1beta1/zz_generated.conversion.go @@ -27,7 +27,7 @@ import ( conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" v1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2" - apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + corev1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func init() { @@ -62,11 +62,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.EKSConfigSpec)(nil), (*EKSConfigSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_EKSConfigSpec_To_v1beta1_EKSConfigSpec(a.(*v1beta2.EKSConfigSpec), b.(*EKSConfigSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*EKSConfigStatus)(nil), (*v1beta2.EKSConfigStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_EKSConfigStatus_To_v1beta2_EKSConfigStatus(a.(*EKSConfigStatus), b.(*v1beta2.EKSConfigStatus), scope) }); err != nil { @@ -127,11 +122,15 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta2.EKSConfigSpec)(nil), (*EKSConfigSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_EKSConfigSpec_To_v1beta1_EKSConfigSpec(a.(*v1beta2.EKSConfigSpec), b.(*EKSConfigSpec), scope) + }); err != nil { + return err + } return nil } func autoConvert_v1beta1_EKSConfig_To_v1beta2_EKSConfig(in *EKSConfig, out *v1beta2.EKSConfig, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_EKSConfigSpec_To_v1beta2_EKSConfigSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -148,7 +147,6 @@ func Convert_v1beta1_EKSConfig_To_v1beta2_EKSConfig(in *EKSConfig, out *v1beta2. } func autoConvert_v1beta2_EKSConfig_To_v1beta1_EKSConfig(in *v1beta2.EKSConfig, out *EKSConfig, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta2_EKSConfigSpec_To_v1beta1_EKSConfigSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -165,7 +163,6 @@ func Convert_v1beta2_EKSConfig_To_v1beta1_EKSConfig(in *v1beta2.EKSConfig, out * } func autoConvert_v1beta1_EKSConfigList_To_v1beta2_EKSConfigList(in *EKSConfigList, out *v1beta2.EKSConfigList, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items @@ -187,7 +184,6 @@ func Convert_v1beta1_EKSConfigList_To_v1beta2_EKSConfigList(in *EKSConfigList, o } func autoConvert_v1beta2_EKSConfigList_To_v1beta1_EKSConfigList(in *v1beta2.EKSConfigList, out *EKSConfigList, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items @@ -251,7 +247,7 @@ func autoConvert_v1beta1_EKSConfigStatus_To_v1beta2_EKSConfigStatus(in *EKSConfi out.FailureReason = in.FailureReason out.FailureMessage = in.FailureMessage out.ObservedGeneration = in.ObservedGeneration - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } @@ -266,7 +262,7 @@ func autoConvert_v1beta2_EKSConfigStatus_To_v1beta1_EKSConfigStatus(in *v1beta2. out.FailureReason = in.FailureReason out.FailureMessage = in.FailureMessage out.ObservedGeneration = in.ObservedGeneration - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } @@ -276,7 +272,6 @@ func Convert_v1beta2_EKSConfigStatus_To_v1beta1_EKSConfigStatus(in *v1beta2.EKSC } func autoConvert_v1beta1_EKSConfigTemplate_To_v1beta2_EKSConfigTemplate(in *EKSConfigTemplate, out *v1beta2.EKSConfigTemplate, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_EKSConfigTemplateSpec_To_v1beta2_EKSConfigTemplateSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -290,7 +285,6 @@ func Convert_v1beta1_EKSConfigTemplate_To_v1beta2_EKSConfigTemplate(in *EKSConfi } func autoConvert_v1beta2_EKSConfigTemplate_To_v1beta1_EKSConfigTemplate(in *v1beta2.EKSConfigTemplate, out *EKSConfigTemplate, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta2_EKSConfigTemplateSpec_To_v1beta1_EKSConfigTemplateSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -304,7 +298,6 @@ func Convert_v1beta2_EKSConfigTemplate_To_v1beta1_EKSConfigTemplate(in *v1beta2. } func autoConvert_v1beta1_EKSConfigTemplateList_To_v1beta2_EKSConfigTemplateList(in *EKSConfigTemplateList, out *v1beta2.EKSConfigTemplateList, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items @@ -326,7 +319,6 @@ func Convert_v1beta1_EKSConfigTemplateList_To_v1beta2_EKSConfigTemplateList(in * } func autoConvert_v1beta2_EKSConfigTemplateList_To_v1beta1_EKSConfigTemplateList(in *v1beta2.EKSConfigTemplateList, out *EKSConfigTemplateList, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items diff --git a/bootstrap/eks/api/v1beta1/zz_generated.deepcopy.go b/bootstrap/eks/api/v1beta1/zz_generated.deepcopy.go index 131707fac7..b09343ed28 100644 --- a/bootstrap/eks/api/v1beta1/zz_generated.deepcopy.go +++ b/bootstrap/eks/api/v1beta1/zz_generated.deepcopy.go @@ -22,7 +22,7 @@ package v1beta1 import ( runtime "k8s.io/apimachinery/pkg/runtime" - apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + corev1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -151,7 +151,7 @@ func (in *EKSConfigStatus) DeepCopyInto(out *EKSConfigStatus) { } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/bootstrap/eks/api/v1beta2/condition_consts.go b/bootstrap/eks/api/v1beta2/condition_consts.go index e12213c840..6f5c49da7c 100644 --- a/bootstrap/eks/api/v1beta2/condition_consts.go +++ b/bootstrap/eks/api/v1beta2/condition_consts.go @@ -16,7 +16,9 @@ limitations under the License. package v1beta2 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +import ( + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" +) // Conditions and condition Reasons for the EKSConfig object // FROM: https://github.com/kubernetes-sigs/cluster-api/blob/main/bootstrap/kubeadm/api/v1beta1/condition_consts.go @@ -27,7 +29,7 @@ const ( // NOTE: When the DataSecret generation starts the process completes immediately and within the // same reconciliation, so the user will always see a transition from Wait to Generated without having // evidence that BootstrapSecret generation is started/in progress. - DataSecretAvailableCondition clusterv1.ConditionType = "DataSecretAvailable" + DataSecretAvailableCondition clusterv1beta1.ConditionType = "DataSecretAvailable" // DataSecretGenerationFailedReason (Severity=Warning) documents a EKSConfig controller detecting // an error while generating a data secret; those kind of errors are usually due to misconfigurations diff --git a/bootstrap/eks/api/v1beta2/eksconfig_types.go b/bootstrap/eks/api/v1beta2/eksconfig_types.go index a2fce8e2cb..1f7905fc1e 100644 --- a/bootstrap/eks/api/v1beta2/eksconfig_types.go +++ b/bootstrap/eks/api/v1beta2/eksconfig_types.go @@ -19,7 +19,7 @@ package v1beta2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // EKSConfigSpec defines the desired state of Amazon EKS Bootstrap Configuration. @@ -107,7 +107,7 @@ type EKSConfigStatus struct { // Conditions defines current service state of the EKSConfig. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` } // Encoding specifies the cloud-init file encoding. @@ -324,12 +324,12 @@ type EKSConfig struct { } // GetConditions returns the observations of the operational state of the EKSConfig resource. -func (r *EKSConfig) GetConditions() clusterv1.Conditions { +func (r *EKSConfig) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the EKSConfig to the predescribed clusterv1.Conditions. -func (r *EKSConfig) SetConditions(conditions clusterv1.Conditions) { +// SetConditions sets the underlying service state of the EKSConfig to the predescribed clusterv1beta1.Conditions. +func (r *EKSConfig) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/bootstrap/eks/api/v1beta2/zz_generated.deepcopy.go b/bootstrap/eks/api/v1beta2/zz_generated.deepcopy.go index 7b059799a7..9bc33bef6f 100644 --- a/bootstrap/eks/api/v1beta2/zz_generated.deepcopy.go +++ b/bootstrap/eks/api/v1beta2/zz_generated.deepcopy.go @@ -22,7 +22,7 @@ package v1beta2 import ( "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. diff --git a/bootstrap/eks/controllers/eksconfig_controller.go b/bootstrap/eks/controllers/eksconfig_controller.go index ca55199a6b..f59bbc1262 100644 --- a/bootstrap/eks/controllers/eksconfig_controller.go +++ b/bootstrap/eks/controllers/eksconfig_controller.go @@ -41,16 +41,18 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" bsutil "sigs.k8s.io/cluster-api/bootstrap/util" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" - "sigs.k8s.io/cluster-api/util/patch" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/cluster-api/util/predicates" ) +const eksConfigKind = "EKSConfig" + // EKSConfigReconciler reconciles a EKSConfig object. type EKSConfigReconciler struct { client.Client @@ -77,7 +79,7 @@ func (r *EKSConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( log.Error(err, "Failed to get config") return ctrl.Result{}, err } - log = log.WithValues("EKSConfig", config.GetName()) + log = log.WithValues(eksConfigKind, config.GetName()) // check owner references and look up owning Machine object configOwner, err := bsutil.GetTypedConfigOwner(ctx, r.Client, config) @@ -117,23 +119,23 @@ func (r *EKSConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( return ctrl.Result{}, err } - patchHelper, err := patch.NewHelper(config, r.Client) + patchHelper, err := v1beta1patch.NewHelper(config, r.Client) if err != nil { return ctrl.Result{}, err } // set up defer block for updating config defer func() { - conditions.SetSummary(config, - conditions.WithConditions( + v1beta1conditions.SetSummary(config, + v1beta1conditions.WithConditions( eksbootstrapv1.DataSecretAvailableCondition, ), - conditions.WithStepCounter(), + v1beta1conditions.WithStepCounter(), ) - patchOpts := []patch.Option{} + patchOpts := []v1beta1patch.Option{} if rerr == nil { - patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{}) + patchOpts = append(patchOpts, v1beta1patch.WithStatusObservedGeneration{}) } if err := patchHelper.Patch(ctx, config, patchOpts...); err != nil { log.Error(rerr, "Failed to patch config") @@ -202,27 +204,27 @@ func (r *EKSConfigReconciler) joinWorker(ctx context.Context, cluster *clusterv1 } } - if cluster.Spec.ControlPlaneRef == nil || cluster.Spec.ControlPlaneRef.Kind != "AWSManagedControlPlane" { + if !cluster.Spec.ControlPlaneRef.IsDefined() || cluster.Spec.ControlPlaneRef.Kind != "AWSManagedControlPlane" { return errors.New("Cluster's controlPlaneRef needs to be an AWSManagedControlPlane in order to use the EKS bootstrap provider") } - if !cluster.Status.InfrastructureReady { + if !ptr.Deref(cluster.Status.Initialization.InfrastructureProvisioned, false) { log.Info("Cluster infrastructure is not ready") - conditions.MarkFalse(config, + v1beta1conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.WaitingForClusterInfrastructureReason, - clusterv1.ConditionSeverityInfo, "") + clusterv1beta1.ConditionSeverityInfo, "") return nil } - if !conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { + if !ptr.Deref(cluster.Status.Initialization.ControlPlaneInitialized, false) { log.Info("Control Plane has not yet been initialized") - conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.WaitingForControlPlaneInitializationReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.WaitingForControlPlaneInitializationReason, clusterv1beta1.ConditionSeverityInfo, "") return nil } controlPlane := &ekscontrolplanev1.AWSManagedControlPlane{} - if err := r.Get(ctx, client.ObjectKey{Name: cluster.Spec.ControlPlaneRef.Name, Namespace: cluster.Spec.ControlPlaneRef.Namespace}, controlPlane); err != nil { + if err := r.Get(ctx, client.ObjectKey{Name: cluster.Spec.ControlPlaneRef.Name, Namespace: cluster.Namespace}, controlPlane); err != nil { return err } @@ -230,7 +232,7 @@ func (r *EKSConfigReconciler) joinWorker(ctx context.Context, cluster *clusterv1 files, err := r.resolveFiles(ctx, config) if err != nil { log.Info("Failed to resolve files for user data") - conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } @@ -275,14 +277,14 @@ func (r *EKSConfigReconciler) joinWorker(ctx context.Context, cluster *clusterv1 userDataScript, err := userdata.NewNode(nodeInput) if err != nil { log.Error(err, "Failed to create a worker join configuration") - conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, "") + v1beta1conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1beta1.ConditionSeverityWarning, "") return err } // store userdata as secret if err := r.storeBootstrapData(ctx, cluster, config, userDataScript); err != nil { log.Error(err, "Failed to store bootstrap data") - conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, "") + v1beta1conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1beta1.ConditionSeverityWarning, "") return err } @@ -301,7 +303,7 @@ func (r *EKSConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Man if feature.Gates.Enabled(feature.MachinePool) { b = b.Watches( - &expclusterv1.MachinePool{}, + &clusterv1.MachinePool{}, handler.EnqueueRequestsFromMapFunc(r.MachinePoolToBootstrapMapFunc), ) } @@ -314,7 +316,7 @@ func (r *EKSConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Man err = c.Watch( source.Kind[client.Object](mgr.GetCache(), &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc((r.ClusterToEKSConfigs)), - predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), logger.FromContext(ctx).GetLogger())), + predicates.ClusterPausedTransitionsOrInfrastructureProvisioned(mgr.GetScheme(), logger.FromContext(ctx).GetLogger())), ) if err != nil { return errors.Wrap(err, "failed adding watch for Clusters to controller manager") @@ -357,7 +359,7 @@ func (r *EKSConfigReconciler) storeBootstrapData(ctx context.Context, cluster *c config.Status.DataSecretName = ptr.To[string](secret.Name) config.Status.Ready = true - conditions.MarkTrue(config, eksbootstrapv1.DataSecretAvailableCondition) + v1beta1conditions.MarkTrue(config, eksbootstrapv1.DataSecretAvailableCondition) return nil } @@ -370,7 +372,7 @@ func (r *EKSConfigReconciler) MachineToBootstrapMapFunc(_ context.Context, o cli if !ok { klog.Errorf("Expected a Machine but got a %T", o) } - if m.Spec.Bootstrap.ConfigRef != nil && m.Spec.Bootstrap.ConfigRef.GroupVersionKind() == eksbootstrapv1.GroupVersion.WithKind("EKSConfig") { + if m.Spec.Bootstrap.ConfigRef.IsDefined() && m.Spec.Bootstrap.ConfigRef.APIGroup == eksbootstrapv1.GroupVersion.Group && m.Spec.Bootstrap.ConfigRef.Kind == eksConfigKind { name := client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.Bootstrap.ConfigRef.Name} result = append(result, ctrl.Request{NamespacedName: name}) } @@ -382,12 +384,12 @@ func (r *EKSConfigReconciler) MachineToBootstrapMapFunc(_ context.Context, o cli func (r *EKSConfigReconciler) MachinePoolToBootstrapMapFunc(_ context.Context, o client.Object) []ctrl.Request { result := []ctrl.Request{} - m, ok := o.(*expclusterv1.MachinePool) + m, ok := o.(*clusterv1.MachinePool) if !ok { klog.Errorf("Expected a MachinePool but got a %T", o) } configRef := m.Spec.Template.Spec.Bootstrap.ConfigRef - if configRef != nil && configRef.GroupVersionKind().GroupKind() == eksbootstrapv1.GroupVersion.WithKind("EKSConfig").GroupKind() { + if configRef.IsDefined() && configRef.APIGroup == eksbootstrapv1.GroupVersion.Group && configRef.Kind == eksConfigKind { name := client.ObjectKey{Namespace: m.Namespace, Name: configRef.Name} result = append(result, ctrl.Request{NamespacedName: name}) } @@ -418,8 +420,9 @@ func (r *EKSConfigReconciler) ClusterToEKSConfigs(_ context.Context, o client.Ob } for _, m := range machineList.Items { - if m.Spec.Bootstrap.ConfigRef != nil && - m.Spec.Bootstrap.ConfigRef.GroupVersionKind().GroupKind() == eksbootstrapv1.GroupVersion.WithKind("EKSConfig").GroupKind() { + if m.Spec.Bootstrap.ConfigRef.IsDefined() && + m.Spec.Bootstrap.ConfigRef.APIGroup == eksbootstrapv1.GroupVersion.Group && + m.Spec.Bootstrap.ConfigRef.Kind == eksConfigKind { name := client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.Bootstrap.ConfigRef.Name} result = append(result, ctrl.Request{NamespacedName: name}) } @@ -440,7 +443,7 @@ func (r *EKSConfigReconciler) createBootstrapSecret(ctx context.Context, cluster OwnerReferences: []metav1.OwnerReference{ { APIVersion: eksbootstrapv1.GroupVersion.String(), - Kind: "EKSConfig", + Kind: eksConfigKind, Name: config.Name, UID: config.UID, Controller: ptr.To[bool](true), diff --git a/bootstrap/eks/controllers/eksconfig_controller_reconciler_test.go b/bootstrap/eks/controllers/eksconfig_controller_reconciler_test.go index 163b94a338..1ae2122b2c 100644 --- a/bootstrap/eks/controllers/eksconfig_controller_reconciler_test.go +++ b/bootstrap/eks/controllers/eksconfig_controller_reconciler_test.go @@ -24,16 +24,15 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/yaml" eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/internal/userdata" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" ) func TestEKSConfigReconciler(t *testing.T) { @@ -85,7 +84,7 @@ func TestEKSConfigReconciler(t *testing.T) { config.ObjectMeta.OwnerReferences = []metav1.OwnerReference{ { Kind: "MachinePool", - APIVersion: v1beta1.GroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), Name: mp.Name, UID: types.UID(fmt.Sprintf("%s uid", mp.Name)), }, @@ -284,17 +283,18 @@ func newCluster(name string) *clusterv1.Cluster { Name: name, }, Spec: clusterv1.ClusterSpec{ - ControlPlaneRef: &corev1.ObjectReference{ - Name: name, - Kind: "AWSManagedControlPlane", - Namespace: "default", + ControlPlaneRef: clusterv1.ContractVersionedObjectReference{ + Name: name, + Kind: "AWSManagedControlPlane", }, }, Status: clusterv1.ClusterStatus{ - InfrastructureReady: true, + Initialization: clusterv1.ClusterInitializationStatus{ + InfrastructureProvisioned: ptr.To(true), + }, }, } - conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) + cluster.Status.Initialization.ControlPlaneInitialized = ptr.To(true) return cluster } @@ -317,9 +317,9 @@ func newMachine(cluster *clusterv1.Cluster, name string) *clusterv1.Machine { }, Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ - ConfigRef: &corev1.ObjectReference{ - Kind: "EKSConfig", - APIVersion: eksbootstrapv1.GroupVersion.String(), + ConfigRef: clusterv1.ContractVersionedObjectReference{ + Kind: "EKSConfig", + APIGroup: eksbootstrapv1.GroupVersion.Group, }, }, }, @@ -334,24 +334,24 @@ func newMachine(cluster *clusterv1.Cluster, name string) *clusterv1.Machine { } // newMachinePool returns a CAPI machine object; if cluster is not nil, the MachinePool is linked to the cluster as well. -func newMachinePool(cluster *clusterv1.Cluster, name string) *v1beta1.MachinePool { +func newMachinePool(cluster *clusterv1.Cluster, name string) *clusterv1.MachinePool { generatedName := fmt.Sprintf("%s-%s", name, util.RandomString(5)) - mp := &v1beta1.MachinePool{ + mp := &clusterv1.MachinePool{ TypeMeta: metav1.TypeMeta{ Kind: "MachinePool", - APIVersion: v1beta1.GroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: generatedName, }, - Spec: v1beta1.MachinePoolSpec{ + Spec: clusterv1.MachinePoolSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ - ConfigRef: &corev1.ObjectReference{ - Kind: "EKSConfig", - APIVersion: eksbootstrapv1.GroupVersion.String(), + ConfigRef: clusterv1.ContractVersionedObjectReference{ + Kind: "EKSConfig", + APIGroup: eksbootstrapv1.GroupVersion.Group, }, }, }, @@ -397,7 +397,7 @@ func newEKSConfig(machine *clusterv1.Machine) *eksbootstrapv1.EKSConfig { } config.Status.DataSecretName = &machine.Name machine.Spec.Bootstrap.ConfigRef.Name = config.Name - machine.Spec.Bootstrap.ConfigRef.Namespace = config.Namespace + machine.Namespace = config.Namespace } return config } diff --git a/bootstrap/eks/controllers/eksconfig_controller_test.go b/bootstrap/eks/controllers/eksconfig_controller_test.go index bb82d14124..c647e7ea8a 100644 --- a/bootstrap/eks/controllers/eksconfig_controller_test.go +++ b/bootstrap/eks/controllers/eksconfig_controller_test.go @@ -22,8 +22,9 @@ import ( . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" bsutil "sigs.k8s.io/cluster-api/bootstrap/util" ) @@ -35,7 +36,9 @@ func TestEKSConfigReconcilerReturnEarlyIfClusterInfraNotReady(t *testing.T) { config := newEKSConfig(machine) cluster.Status = clusterv1.ClusterStatus{ - InfrastructureReady: false, + Initialization: clusterv1.ClusterInitializationStatus{ + InfrastructureProvisioned: ptr.To(true), + }, } reconciler := EKSConfigReconciler{ @@ -56,7 +59,9 @@ func TestEKSConfigReconcilerReturnEarlyIfClusterControlPlaneNotInitialized(t *te config := newEKSConfig(machine) cluster.Status = clusterv1.ClusterStatus{ - InfrastructureReady: true, + Initialization: clusterv1.ClusterInitializationStatus{ + InfrastructureProvisioned: ptr.To(true), + }, } reconciler := EKSConfigReconciler{ diff --git a/bootstrap/eks/internal/userdata/node.go b/bootstrap/eks/internal/userdata/node.go index 468f15478f..f85f1f86f4 100644 --- a/bootstrap/eks/internal/userdata/node.go +++ b/bootstrap/eks/internal/userdata/node.go @@ -30,7 +30,9 @@ const ( defaultBootstrapCommand = "/etc/eks/bootstrap.sh" nodeUserData = `#cloud-config +{{- if .Files }} {{template "files" .Files}} +{{- end }} runcmd: {{- template "commands" .PreBootstrapCommands }} - {{ .BootstrapCommand }} {{.ClusterName}} {{- template "args" . }} diff --git a/bootstrap/eks/internal/userdata/node_test.go b/bootstrap/eks/internal/userdata/node_test.go index a5e314c115..307278a46b 100644 --- a/bootstrap/eks/internal/userdata/node_test.go +++ b/bootstrap/eks/internal/userdata/node_test.go @@ -49,7 +49,6 @@ func TestNewNode(t *testing.T) { }, }, expectedBytes: []byte(`#cloud-config -write_files: runcmd: - /etc/eks/bootstrap.sh test-cluster `), @@ -67,7 +66,6 @@ runcmd: }, }, expectedBytes: []byte(`#cloud-config -write_files: runcmd: - /etc/eks/bootstrap.sh test-cluster --kubelet-extra-args '--node-labels=node-role.undistro.io/infra=true --register-with-taints=dedicated=infra:NoSchedule' `), @@ -81,7 +79,6 @@ runcmd: }, }, expectedBytes: []byte(`#cloud-config -write_files: runcmd: - /etc/eks/bootstrap.sh test-cluster --container-runtime containerd `), @@ -99,7 +96,6 @@ runcmd: }, }, expectedBytes: []byte(`#cloud-config -write_files: runcmd: - /etc/eks/bootstrap.sh test-cluster --kubelet-extra-args '--node-labels=node-role.undistro.io/infra=true --register-with-taints=dedicated=infra:NoSchedule' --container-runtime containerd `), @@ -114,7 +110,6 @@ runcmd: }, }, expectedBytes: []byte(`#cloud-config -write_files: runcmd: - /etc/eks/bootstrap.sh test-cluster --ip-family ipv6 --service-ipv6-cidr fe80:0000:0000:0000:0204:61ff:fe9d:f156/24 `), @@ -128,7 +123,6 @@ runcmd: }, }, expectedBytes: []byte(`#cloud-config -write_files: runcmd: - /etc/eks/bootstrap.sh test-cluster --use-max-pods false `), @@ -142,7 +136,6 @@ runcmd: }, }, expectedBytes: []byte(`#cloud-config -write_files: runcmd: - /etc/eks/bootstrap.sh test-cluster --aws-api-retry-attempts 5 `), @@ -157,7 +150,6 @@ runcmd: }, }, expectedBytes: []byte(`#cloud-config -write_files: runcmd: - /etc/eks/bootstrap.sh test-cluster --pause-container-account 12345678 --pause-container-version v1 `), @@ -171,7 +163,6 @@ runcmd: }, }, expectedBytes: []byte(`#cloud-config -write_files: runcmd: - /etc/eks/bootstrap.sh test-cluster --dns-cluster-ip 192.168.0.1 `), @@ -185,7 +176,6 @@ runcmd: }, }, expectedBytes: []byte(`#cloud-config -write_files: runcmd: - /etc/eks/bootstrap.sh test-cluster --docker-config-json '{"debug":true}' `), @@ -199,7 +189,6 @@ runcmd: }, }, expectedBytes: []byte(`#cloud-config -write_files: runcmd: - "date" - "echo \"testing\"" @@ -215,7 +204,6 @@ runcmd: }, }, expectedBytes: []byte(`#cloud-config -write_files: runcmd: - /etc/eks/bootstrap.sh test-cluster - "date" @@ -232,7 +220,6 @@ runcmd: }, }, expectedBytes: []byte(`#cloud-config -write_files: runcmd: - "echo \"testing pre\"" - /etc/eks/bootstrap.sh test-cluster @@ -248,7 +235,6 @@ runcmd: }, }, expectedBytes: []byte(`#cloud-config -write_files: runcmd: - /custom/mybootstrap.sh test-cluster `), @@ -280,7 +266,6 @@ runcmd: }, }, expectedBytes: []byte(`#cloud-config -write_files: runcmd: - /etc/eks/bootstrap.sh test-cluster disk_setup: @@ -323,6 +308,31 @@ write_files: fs.inotify.max_user_instances=256 runcmd: - /etc/eks/bootstrap.sh test-cluster +`), + }, + { + name: "with empty files", + args: args{ + input: &NodeInput{ + ClusterName: "test-cluster", + Files: []eksbootstrapv1.File{}, + }, + }, + expectedBytes: []byte(`#cloud-config +runcmd: + - /etc/eks/bootstrap.sh test-cluster +`), + }, + { + name: "with nil files", + args: args{ + input: &NodeInput{ + ClusterName: "test-cluster", + }, + }, + expectedBytes: []byte(`#cloud-config +runcmd: + - /etc/eks/bootstrap.sh test-cluster `), }, { @@ -337,7 +347,6 @@ runcmd: }, }, expectedBytes: []byte(`#cloud-config -write_files: runcmd: - /etc/eks/bootstrap.sh test-cluster ntp: @@ -363,7 +372,6 @@ ntp: }, }, expectedBytes: []byte(`#cloud-config -write_files: runcmd: - /etc/eks/bootstrap.sh test-cluster users: diff --git a/cloudbuild-nightly.yaml b/cloudbuild-nightly.yaml index 8bd7f68720..35c50c403d 100644 --- a/cloudbuild-nightly.yaml +++ b/cloudbuild-nightly.yaml @@ -1,5 +1,5 @@ # See https://cloud.google.com/cloud-build/docs/build-config -timeout: 3000s +timeout: 7200s options: substitution_option: ALLOW_LOOSE steps: diff --git a/cloudbuild.yaml b/cloudbuild.yaml index 88aedb0471..3a90143685 100644 --- a/cloudbuild.yaml +++ b/cloudbuild.yaml @@ -1,5 +1,5 @@ # See https://cloud.google.com/cloud-build/docs/build-config -timeout: 3000s +timeout: 7200s options: substitution_option: ALLOW_LOOSE steps: diff --git a/cmd/clusterawsadm/api/bootstrap/v1alpha1/zz_generated.conversion.go b/cmd/clusterawsadm/api/bootstrap/v1alpha1/zz_generated.conversion.go index fb6a2b2045..9de9a6883b 100644 --- a/cmd/clusterawsadm/api/bootstrap/v1alpha1/zz_generated.conversion.go +++ b/cmd/clusterawsadm/api/bootstrap/v1alpha1/zz_generated.conversion.go @@ -53,21 +53,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.AWSIAMConfigurationSpec)(nil), (*AWSIAMConfigurationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_AWSIAMConfigurationSpec_To_v1alpha1_AWSIAMConfigurationSpec(a.(*v1beta1.AWSIAMConfigurationSpec), b.(*AWSIAMConfigurationSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*AWSIAMRoleSpec)(nil), (*v1beta1.AWSIAMRoleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha1_AWSIAMRoleSpec_To_v1beta1_AWSIAMRoleSpec(a.(*AWSIAMRoleSpec), b.(*v1beta1.AWSIAMRoleSpec), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.AWSIAMRoleSpec)(nil), (*AWSIAMRoleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_AWSIAMRoleSpec_To_v1alpha1_AWSIAMRoleSpec(a.(*v1beta1.AWSIAMRoleSpec), b.(*AWSIAMRoleSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*BootstrapUser)(nil), (*v1beta1.BootstrapUser)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha1_BootstrapUser_To_v1beta1_BootstrapUser(a.(*BootstrapUser), b.(*v1beta1.BootstrapUser), scope) }); err != nil { @@ -128,11 +118,20 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta1.AWSIAMConfigurationSpec)(nil), (*AWSIAMConfigurationSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_AWSIAMConfigurationSpec_To_v1alpha1_AWSIAMConfigurationSpec(a.(*v1beta1.AWSIAMConfigurationSpec), b.(*AWSIAMConfigurationSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta1.AWSIAMRoleSpec)(nil), (*AWSIAMRoleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_AWSIAMRoleSpec_To_v1alpha1_AWSIAMRoleSpec(a.(*v1beta1.AWSIAMRoleSpec), b.(*AWSIAMRoleSpec), scope) + }); err != nil { + return err + } return nil } func autoConvert_v1alpha1_AWSIAMConfiguration_To_v1beta1_AWSIAMConfiguration(in *AWSIAMConfiguration, out *v1beta1.AWSIAMConfiguration, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta if err := Convert_v1alpha1_AWSIAMConfigurationSpec_To_v1beta1_AWSIAMConfigurationSpec(&in.Spec, &out.Spec, s); err != nil { return err } @@ -145,7 +144,6 @@ func Convert_v1alpha1_AWSIAMConfiguration_To_v1beta1_AWSIAMConfiguration(in *AWS } func autoConvert_v1beta1_AWSIAMConfiguration_To_v1alpha1_AWSIAMConfiguration(in *v1beta1.AWSIAMConfiguration, out *AWSIAMConfiguration, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta if err := Convert_v1beta1_AWSIAMConfigurationSpec_To_v1alpha1_AWSIAMConfigurationSpec(&in.Spec, &out.Spec, s); err != nil { return err } diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/cluster_api_controller.go b/cmd/clusterawsadm/cloudformation/bootstrap/cluster_api_controller.go index 299386e492..846f2ebcdc 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/cluster_api_controller.go +++ b/cmd/clusterawsadm/cloudformation/bootstrap/cluster_api_controller.go @@ -199,6 +199,7 @@ func (t Template) ControllersPolicy() *iamv1.PolicyDocument { "arn:*:autoscaling:*:*:autoScalingGroup:*:autoScalingGroupName/*", }, Action: iamv1.Actions{ + "autoscaling:CancelInstanceRefresh", "autoscaling:CreateAutoScalingGroup", "autoscaling:UpdateAutoScalingGroup", "autoscaling:CreateOrUpdateTags", diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/customsuffix.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/customsuffix.yaml index 8eef68545c..f237fee3d8 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/customsuffix.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/customsuffix.yaml @@ -255,6 +255,7 @@ Resources: Resource: - '*' - Action: + - autoscaling:CancelInstanceRefresh - autoscaling:CreateAutoScalingGroup - autoscaling:UpdateAutoScalingGroup - autoscaling:CreateOrUpdateTags diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/default.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/default.yaml index 91c8c93d13..dcd7265768 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/default.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/default.yaml @@ -255,6 +255,7 @@ Resources: Resource: - '*' - Action: + - autoscaling:CancelInstanceRefresh - autoscaling:CreateAutoScalingGroup - autoscaling:UpdateAutoScalingGroup - autoscaling:CreateOrUpdateTags diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_all_secret_backends.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_all_secret_backends.yaml index 7ca9cddb8a..2b86661dc3 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_all_secret_backends.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_all_secret_backends.yaml @@ -261,6 +261,7 @@ Resources: Resource: - '*' - Action: + - autoscaling:CancelInstanceRefresh - autoscaling:CreateAutoScalingGroup - autoscaling:UpdateAutoScalingGroup - autoscaling:CreateOrUpdateTags diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_allow_assume_role.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_allow_assume_role.yaml index d057512504..51749bbbdb 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_allow_assume_role.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_allow_assume_role.yaml @@ -255,6 +255,7 @@ Resources: Resource: - '*' - Action: + - autoscaling:CancelInstanceRefresh - autoscaling:CreateAutoScalingGroup - autoscaling:UpdateAutoScalingGroup - autoscaling:CreateOrUpdateTags diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_bootstrap_user.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_bootstrap_user.yaml index e5bcd2b062..04358c4c81 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_bootstrap_user.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_bootstrap_user.yaml @@ -261,6 +261,7 @@ Resources: Resource: - '*' - Action: + - autoscaling:CancelInstanceRefresh - autoscaling:CreateAutoScalingGroup - autoscaling:UpdateAutoScalingGroup - autoscaling:CreateOrUpdateTags diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_custom_bootstrap_user.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_custom_bootstrap_user.yaml index 82e8fe4aec..d274fc8aad 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_custom_bootstrap_user.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_custom_bootstrap_user.yaml @@ -261,6 +261,7 @@ Resources: Resource: - '*' - Action: + - autoscaling:CancelInstanceRefresh - autoscaling:CreateAutoScalingGroup - autoscaling:UpdateAutoScalingGroup - autoscaling:CreateOrUpdateTags diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_different_instance_profiles.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_different_instance_profiles.yaml index 0a893da464..09e30c5f18 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_different_instance_profiles.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_different_instance_profiles.yaml @@ -255,6 +255,7 @@ Resources: Resource: - '*' - Action: + - autoscaling:CancelInstanceRefresh - autoscaling:CreateAutoScalingGroup - autoscaling:UpdateAutoScalingGroup - autoscaling:CreateOrUpdateTags diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_console.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_console.yaml index 32f8d798be..fc989c470c 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_console.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_console.yaml @@ -255,6 +255,7 @@ Resources: Resource: - '*' - Action: + - autoscaling:CancelInstanceRefresh - autoscaling:CreateAutoScalingGroup - autoscaling:UpdateAutoScalingGroup - autoscaling:CreateOrUpdateTags diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_default_roles.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_default_roles.yaml index 54ea84c776..2f731ead80 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_default_roles.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_default_roles.yaml @@ -255,6 +255,7 @@ Resources: Resource: - '*' - Action: + - autoscaling:CancelInstanceRefresh - autoscaling:CreateAutoScalingGroup - autoscaling:UpdateAutoScalingGroup - autoscaling:CreateOrUpdateTags diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_disable.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_disable.yaml index 77669b3dbe..5aed7870dd 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_disable.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_disable.yaml @@ -255,6 +255,7 @@ Resources: Resource: - '*' - Action: + - autoscaling:CancelInstanceRefresh - autoscaling:CreateAutoScalingGroup - autoscaling:UpdateAutoScalingGroup - autoscaling:CreateOrUpdateTags diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_kms_prefix.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_kms_prefix.yaml index 80daf80ebe..31942853a2 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_kms_prefix.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_eks_kms_prefix.yaml @@ -255,6 +255,7 @@ Resources: Resource: - '*' - Action: + - autoscaling:CancelInstanceRefresh - autoscaling:CreateAutoScalingGroup - autoscaling:UpdateAutoScalingGroup - autoscaling:CreateOrUpdateTags diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_extra_statements.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_extra_statements.yaml index ac53343201..35e1d6e91f 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_extra_statements.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_extra_statements.yaml @@ -261,6 +261,7 @@ Resources: Resource: - '*' - Action: + - autoscaling:CancelInstanceRefresh - autoscaling:CreateAutoScalingGroup - autoscaling:UpdateAutoScalingGroup - autoscaling:CreateOrUpdateTags diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_s3_bucket.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_s3_bucket.yaml index 29ab92adee..fb91d49841 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_s3_bucket.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_s3_bucket.yaml @@ -255,6 +255,7 @@ Resources: Resource: - '*' - Action: + - autoscaling:CancelInstanceRefresh - autoscaling:CreateAutoScalingGroup - autoscaling:UpdateAutoScalingGroup - autoscaling:CreateOrUpdateTags diff --git a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_ssm_secret_backend.yaml b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_ssm_secret_backend.yaml index b282be4ad6..875d58b568 100644 --- a/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_ssm_secret_backend.yaml +++ b/cmd/clusterawsadm/cloudformation/bootstrap/fixtures/with_ssm_secret_backend.yaml @@ -255,6 +255,7 @@ Resources: Resource: - '*' - Action: + - autoscaling:CancelInstanceRefresh - autoscaling:CreateAutoScalingGroup - autoscaling:UpdateAutoScalingGroup - autoscaling:CreateOrUpdateTags diff --git a/cmd/clusterawsadm/gc/gc.go b/cmd/clusterawsadm/gc/gc.go index dac5a1f004..24b8779390 100644 --- a/cmd/clusterawsadm/gc/gc.go +++ b/cmd/clusterawsadm/gc/gc.go @@ -22,6 +22,7 @@ import ( "fmt" "strings" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" _ "k8s.io/client-go/plugin/pkg/client/auth/exec" // import all auth plugins @@ -32,7 +33,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/annotations" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/controllers/external" "sigs.k8s.io/cluster-api/util/patch" ) @@ -45,6 +46,7 @@ func init() { _ = clusterv1.AddToScheme(scheme) _ = infrav1.AddToScheme(scheme) _ = ekscontrolplanev1.AddToScheme(scheme) + _ = apiextensionsv1.AddToScheme(scheme) } // CmdProcessor handles the garbage collection commands. @@ -187,9 +189,9 @@ func (c *CmdProcessor) getInfraCluster(ctx context.Context) (*unstructured.Unstr } ref := cluster.Spec.InfrastructureRef - obj, err := external.Get(ctx, c.client, ref) + obj, err := external.GetObjectFromContractVersionedRef(ctx, c.client, ref, c.namespace) if err != nil { - return nil, fmt.Errorf("getting infra cluster %s/%s: %w", ref.Namespace, ref.Name, err) + return nil, fmt.Errorf("getting infra cluster %s/%s: %w", c.namespace, ref.Name, err) } return obj, nil diff --git a/cmd/clusterawsadm/gc/gc_test.go b/cmd/clusterawsadm/gc/gc_test.go index 8e890579aa..a4449d9723 100644 --- a/cmd/clusterawsadm/gc/gc_test.go +++ b/cmd/clusterawsadm/gc/gc_test.go @@ -22,7 +22,7 @@ import ( "testing" . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -31,7 +31,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/annotations" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/controllers/external" ) @@ -105,7 +105,7 @@ func TestEnableGC(t *testing.T) { cluster := tc.existingObjs[0].(*clusterv1.Cluster) ref := cluster.Spec.InfrastructureRef - obj, err := external.Get(ctx, fake, ref) + obj, err := external.GetObjectFromContractVersionedRef(ctx, fake, ref, cluster.Namespace) g.Expect(err).NotTo(HaveOccurred()) g.Expect(obj).NotTo(BeNil()) @@ -176,7 +176,7 @@ func TestDisableGC(t *testing.T) { cluster := tc.existingObjs[0].(*clusterv1.Cluster) ref := cluster.Spec.InfrastructureRef - obj, err := external.Get(ctx, fake, ref) + obj, err := external.GetObjectFromContractVersionedRef(ctx, fake, ref, cluster.Namespace) g.Expect(err).NotTo(HaveOccurred()) g.Expect(obj).NotTo(BeNil()) @@ -271,7 +271,7 @@ func TestConfigureGC(t *testing.T) { cluster := tc.existingObjs[0].(*clusterv1.Cluster) ref := cluster.Spec.InfrastructureRef - obj, err := external.Get(ctx, fake, ref) + obj, err := external.GetObjectFromContractVersionedRef(ctx, fake, ref, cluster.Namespace) g.Expect(err).NotTo(HaveOccurred()) g.Expect(obj).NotTo(BeNil()) @@ -289,7 +289,66 @@ func TestConfigureGC(t *testing.T) { } func newFakeClient(scheme *runtime.Scheme, objs ...client.Object) client.Client { - return fake.NewClientBuilder().WithScheme(scheme).WithObjects(objs...).Build() + // Add CRDs to the fake client so external.GetObjectFromContractVersionedRef can find them + crds := []client.Object{ + &apiextensionsv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "awsmanagedcontrolplanes.controlplane.cluster.x-k8s.io", + Labels: map[string]string{ + "cluster.x-k8s.io/v1beta1": "v1beta2", + }, + }, + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: ekscontrolplanev1.GroupVersion.Group, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Kind: "AWSManagedControlPlane", + Plural: "awsmanagedcontrolplanes", + }, + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1beta2", + Served: true, + Storage: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + }, + }, + }, + }, + }, + }, + &apiextensionsv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: "awsclusters.infrastructure.cluster.x-k8s.io", + Labels: map[string]string{ + "cluster.x-k8s.io/v1beta1": "v1beta2", + }, + }, + Spec: apiextensionsv1.CustomResourceDefinitionSpec{ + Group: infrav1.GroupVersion.Group, + Names: apiextensionsv1.CustomResourceDefinitionNames{ + Kind: "AWSCluster", + Plural: "awsclusters", + }, + Versions: []apiextensionsv1.CustomResourceDefinitionVersion{ + { + Name: "v1beta2", + Served: true, + Storage: true, + Schema: &apiextensionsv1.CustomResourceValidation{ + OpenAPIV3Schema: &apiextensionsv1.JSONSchemaProps{ + Type: "object", + }, + }, + }, + }, + }, + }, + } + + allObjs := append(crds, objs...) + return fake.NewClientBuilder().WithScheme(scheme).WithObjects(allObjs...).Build() } func newManagedCluster(name string, excludeInfra bool) []client.Object { @@ -304,11 +363,10 @@ func newManagedCluster(name string, excludeInfra bool) []client.Object { Namespace: "default", }, Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{ - Name: name, - Namespace: "default", - Kind: "AWSManagedControlPlane", - APIVersion: ekscontrolplanev1.GroupVersion.String(), + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Name: name, + Kind: "AWSManagedControlPlane", + APIGroup: ekscontrolplanev1.GroupVersion.Group, }, }, }, @@ -351,11 +409,10 @@ func newUnManagedCluster(name string, excludeInfra bool) []client.Object { Namespace: "default", }, Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{ - Name: name, - Namespace: "default", - Kind: "AWSCluster", - APIVersion: infrav1.GroupVersion.String(), + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Name: name, + Kind: "AWSCluster", + APIGroup: infrav1.GroupVersion.Group, }, }, }, diff --git a/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml b/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml index 937de1cc32..662022c257 100644 --- a/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml +++ b/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml @@ -1262,6 +1262,17 @@ spec: - AMDEncryptedVirtualizationNestedPaging type: string type: object + dynamicHostAllocation: + description: |- + DynamicHostAllocation enables automatic allocation of dedicated hosts. + This field is mutually exclusive with HostID. + properties: + tags: + additionalProperties: + type: string + description: Tags to apply to the allocated dedicated host. + type: object + type: object ebsOptimized: description: Indicates whether the instance is optimized for Amazon EBS I/O. @@ -2270,6 +2281,28 @@ spec: description: AWSManagedControlPlaneSpec defines the desired state of an Amazon EKS Cluster. properties: + accessConfig: + description: AccessConfig specifies the access configuration information + for the cluster + properties: + authenticationMode: + default: config_map + description: |- + AuthenticationMode specifies the desired authentication mode for the cluster + Defaults to config_map + enum: + - config_map + - api + - api_and_config_map + type: string + bootstrapClusterCreatorAdminPermissions: + default: true + description: |- + BootstrapClusterCreatorAdminPermissions grants cluster admin permissions + to the IAM identity creating the cluster. Only applied during creation, + ignored when updating existing clusters. Defaults to true. + type: boolean + type: object additionalTags: additionalProperties: type: string @@ -3078,7 +3111,7 @@ spec: type: object oidcIdentityProviderConfig: description: |- - IdentityProviderconfig is used to specify the oidc provider config + OIDCIdentityProviderConfig is used to specify the OIDC provider config to be attached with this eks cluster properties: clientId: @@ -3221,6 +3254,17 @@ spec: - iam-authenticator - aws-cli type: string + upgradePolicy: + description: |- + The cluster upgrade policy to use for the cluster. + (Official AWS docs for this policy: https://docs.aws.amazon.com/eks/latest/userguide/view-upgrade-policy.html) + `extended` upgrade policy indicates that the cluster will enter into extended support once the Kubernetes version reaches end of standard support. You will incur extended support charges with this setting. You can upgrade your cluster to a standard supported Kubernetes version to stop incurring extended support charges. + `standard` upgrade policy indicates that the cluster is eligible for automatic upgrade at the end of standard support. You will not incur extended support charges with this setting but your EKS cluster will automatically upgrade to the next supported Kubernetes version in standard support. + If omitted, new clusters will use the AWS default upgrade policy (which at the time of writing is "extended") and existing clusters will have their upgrade policy unchanged. + enum: + - extended + - standard + type: string version: description: |- Version defines the desired Kubernetes version. If no version number @@ -3506,6 +3550,17 @@ spec: - AMDEncryptedVirtualizationNestedPaging type: string type: object + dynamicHostAllocation: + description: |- + DynamicHostAllocation enables automatic allocation of dedicated hosts. + This field is mutually exclusive with HostID. + properties: + tags: + additionalProperties: + type: string + description: Tags to apply to the allocated dedicated host. + type: object + type: object ebsOptimized: description: Indicates whether the instance is optimized for Amazon EBS I/O. diff --git a/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanetemplates.yaml b/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanetemplates.yaml index 7a0abb3cf8..e0a9b811ce 100644 --- a/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanetemplates.yaml +++ b/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanetemplates.yaml @@ -53,6 +53,28 @@ spec: description: AWSManagedControlPlaneSpec defines the desired state of an Amazon EKS Cluster. properties: + accessConfig: + description: AccessConfig specifies the access configuration + information for the cluster + properties: + authenticationMode: + default: config_map + description: |- + AuthenticationMode specifies the desired authentication mode for the cluster + Defaults to config_map + enum: + - config_map + - api + - api_and_config_map + type: string + bootstrapClusterCreatorAdminPermissions: + default: true + description: |- + BootstrapClusterCreatorAdminPermissions grants cluster admin permissions + to the IAM identity creating the cluster. Only applied during creation, + ignored when updating existing clusters. Defaults to true. + type: boolean + type: object additionalTags: additionalProperties: type: string @@ -878,7 +900,7 @@ spec: type: object oidcIdentityProviderConfig: description: |- - IdentityProviderconfig is used to specify the oidc provider config + OIDCIdentityProviderConfig is used to specify the OIDC provider config to be attached with this eks cluster properties: clientId: @@ -1022,6 +1044,17 @@ spec: - iam-authenticator - aws-cli type: string + upgradePolicy: + description: |- + The cluster upgrade policy to use for the cluster. + (Official AWS docs for this policy: https://docs.aws.amazon.com/eks/latest/userguide/view-upgrade-policy.html) + `extended` upgrade policy indicates that the cluster will enter into extended support once the Kubernetes version reaches end of standard support. You will incur extended support charges with this setting. You can upgrade your cluster to a standard supported Kubernetes version to stop incurring extended support charges. + `standard` upgrade policy indicates that the cluster is eligible for automatic upgrade at the end of standard support. You will not incur extended support charges with this setting but your EKS cluster will automatically upgrade to the next supported Kubernetes version in standard support. + If omitted, new clusters will use the AWS default upgrade policy (which at the time of writing is "extended") and existing clusters will have their upgrade policy unchanged. + enum: + - extended + - standard + type: string version: description: |- Version defines the desired Kubernetes version. If no version number diff --git a/config/crd/bases/controlplane.cluster.x-k8s.io_rosacontrolplanes.yaml b/config/crd/bases/controlplane.cluster.x-k8s.io_rosacontrolplanes.yaml index e0c2f57080..1f49d9548e 100644 --- a/config/crd/bases/controlplane.cluster.x-k8s.io_rosacontrolplanes.yaml +++ b/config/crd/bases/controlplane.cluster.x-k8s.io_rosacontrolplanes.yaml @@ -63,6 +63,25 @@ spec: AuditLogRoleARN defines the role that is used to forward audit logs to AWS CloudWatch. If not set, audit log forwarding is disabled. type: string + autoNode: + description: autoNode set the autoNode mode and roleARN. + properties: + mode: + default: Disabled + description: mode specifies the mode for the AutoNode. Setting + Enable/Disable mode will allows/disallow karpenter AutoNode + scaling. + enum: + - Enabled + - Disabled + type: string + roleARN: + description: |- + roleARN sets the autoNode role ARN, which includes the IAM policy and cluster-specific role that grant the necessary permissions to the Karpenter controller. + The role must be attached with the same OIDC-ID that is used with the ROSA-HCP cluster. + maxLength: 2048 + type: string + type: object availabilityZones: description: |- AvailabilityZones describe AWS AvailabilityZones of the worker nodes. @@ -525,8 +544,9 @@ spec: - name type: object installerRoleARN: - description: InstallerRoleARN is an AWS IAM role that OpenShift Cluster - Manager will assume to create the cluster.. + description: |- + InstallerRoleARN is an AWS IAM role that OpenShift Cluster Manager will assume to create the cluster. + Required if RosaRoleConfigRef is not specified. type: string network: description: Network config for the ROSA HCP cluster. @@ -560,7 +580,9 @@ spec: type: string type: object oidcID: - description: The ID of the internal OpenID Connect Provider. + description: |- + The ID of the internal OpenID Connect Provider. + Required if RosaRoleConfigRef is not specified. type: string x-kubernetes-validations: - message: oidcID is immutable @@ -576,8 +598,9 @@ spec: description: The AWS Region the cluster lives in. type: string rolesRef: - description: AWS IAM roles used to perform credential requests by - the openshift operators. + description: |- + AWS IAM roles used to perform credential requests by the openshift operators. + Required if RosaRoleConfigRef is not specified. properties: controlPlaneOperatorARN: description: "ControlPlaneOperatorARN is an ARN value referencing @@ -777,6 +800,38 @@ spec: x-kubernetes-validations: - message: rosaClusterName is immutable rule: self == oldSelf + rosaNetworkRef: + description: |- + ROSANetworkRef references ROSANetwork custom resource that contains the networking infrastructure + for the ROSA HCP cluster. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + rosaRoleConfigRef: + description: |- + RosaRoleConfigRef is a reference to a RosaRoleConfig resource that contains account roles, operator roles and OIDC configuration. + RosaRoleConfigRef and role fields such as installerRoleARN, supportRoleARN, workerRoleARN, rolesRef and oidcID are mutually exclusive. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic subnets: description: |- The Subnet IDs to use when installing the cluster. @@ -788,6 +843,7 @@ spec: description: |- SupportRoleARN is an AWS IAM role used by Red Hat SREs to enable access to the cluster account in order to provide support. + Required if RosaRoleConfigRef is not specified. type: string version: description: OpenShift semantic version, for example "4.14.5". @@ -806,22 +862,16 @@ spec: - AlwaysAcknowledge type: string workerRoleARN: - description: WorkerRoleARN is an AWS IAM role that will be attached - to worker instances. + description: |- + WorkerRoleARN is an AWS IAM role that will be attached to worker instances. + Required if RosaRoleConfigRef is not specified. type: string required: - - availabilityZones - channelGroup - - installerRoleARN - - oidcID - region - - rolesRef - rosaClusterName - - subnets - - supportRoleARN - version - versionGate - - workerRoleARN type: object status: description: RosaControlPlaneStatus defines the observed state of ROSAControlPlane. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml index 83416aa9ae..869454a917 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml @@ -2240,6 +2240,17 @@ spec: - AMDEncryptedVirtualizationNestedPaging type: string type: object + dynamicHostAllocation: + description: |- + DynamicHostAllocation enables automatic allocation of dedicated hosts. + This field is mutually exclusive with HostID. + properties: + tags: + additionalProperties: + type: string + description: Tags to apply to the allocated dedicated host. + type: object + type: object ebsOptimized: description: Indicates whether the instance is optimized for Amazon EBS I/O. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachines.yaml index d7aa2cfef6..81cfd85073 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachines.yaml @@ -80,6 +80,7 @@ spec: arn: description: |- ARN of resource. + Deprecated: This field has no function and is going to be removed in the next release. type: string filters: @@ -344,6 +345,7 @@ spec: arn: description: |- ARN of resource. + Deprecated: This field has no function and is going to be removed in the next release. type: string filters: @@ -717,6 +719,19 @@ spec: - AMDEncryptedVirtualizationNestedPaging type: string type: object + dynamicHostAllocation: + description: |- + DynamicHostAllocation enables automatic allocation of a single dedicated host. + This field is mutually exclusive with HostID and always allocates exactly one host. + Cost effectiveness of allocating a single instance on a dedicated host may vary + depending on the instance type and the region. + properties: + tags: + additionalProperties: + type: string + description: Tags to apply to the allocated dedicated host. + type: object + type: object elasticIpPool: description: ElasticIPPool is the configuration to allocate Public IPv4 address (Elastic IP/EIP) from user-defined pool. @@ -747,18 +762,22 @@ spec: rule: self in ['none','amazon-pool'] type: object hostAffinity: + default: host description: |- HostAffinity specifies the dedicated host affinity setting for the instance. - When hostAffinity is set to host, an instance started onto a specific host always restarts on the same host if stopped. - When hostAffinity is set to default, and you stop and restart the instance, it can be restarted on any available host. + When HostAffinity is set to host, an instance started onto a specific host always restarts on the same host if stopped. + When HostAffinity is set to default, and you stop and restart the instance, it can be restarted on any available host. When HostAffinity is defined, HostID is required. enum: - default - host type: string hostID: - description: HostID specifies the Dedicated Host on which the instance - must be started. + description: |- + HostID specifies the Dedicated Host on which the instance must be started. + This field is mutually exclusive with DynamicHostAllocation. + maxLength: 19 + pattern: ^h-[0-9a-f]{17}$ type: string iamInstanceProfile: description: IAMInstanceProfile is a name of an IAM instance profile @@ -1163,8 +1182,12 @@ spec: type: string type: object tenancy: - description: Tenancy indicates if instance should run on shared or - single-tenant hardware. + description: |- + Tenancy indicates if instance should run on shared or single-tenant hardware. + When Tenancy=host, AWS will attempt to find a suitable host from: + - Preexisting allocated hosts that have auto-placement enabled + - A specific host ID, if configured + - Allocating a new dedicated host if DynamicHostAllocation is configured enum: - default - dedicated @@ -1267,6 +1290,17 @@ spec: - type type: object type: array + dedicatedHost: + description: |- + DedicatedHost tracks the dynamically allocated dedicated host. + This field is populated when DynamicHostAllocation is used. + properties: + id: + description: |- + ID tracks the dynamically allocated dedicated host ID. + This field is populated when DynamicHostAllocation is used. + type: string + type: object failureMessage: description: |- FailureMessage will be set in the event that there is a terminal problem diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinetemplates.yaml index 5e3f55519d..ac7689c7de 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinetemplates.yaml @@ -91,6 +91,7 @@ spec: arn: description: |- ARN of resource. + Deprecated: This field has no function and is going to be removed in the next release. type: string filters: @@ -360,6 +361,7 @@ spec: arn: description: |- ARN of resource. + Deprecated: This field has no function and is going to be removed in the next release. type: string filters: @@ -636,6 +638,20 @@ spec: - AMDEncryptedVirtualizationNestedPaging type: string type: object + dynamicHostAllocation: + description: |- + DynamicHostAllocation enables automatic allocation of a single dedicated host. + This field is mutually exclusive with HostID and always allocates exactly one host. + Cost effectiveness of allocating a single instance on a dedicated host may vary + depending on the instance type and the region. + properties: + tags: + additionalProperties: + type: string + description: Tags to apply to the allocated dedicated + host. + type: object + type: object elasticIpPool: description: ElasticIPPool is the configuration to allocate Public IPv4 address (Elastic IP/EIP) from user-defined pool. @@ -666,18 +682,22 @@ spec: rule: self in ['none','amazon-pool'] type: object hostAffinity: + default: host description: |- HostAffinity specifies the dedicated host affinity setting for the instance. - When hostAffinity is set to host, an instance started onto a specific host always restarts on the same host if stopped. - When hostAffinity is set to default, and you stop and restart the instance, it can be restarted on any available host. + When HostAffinity is set to host, an instance started onto a specific host always restarts on the same host if stopped. + When HostAffinity is set to default, and you stop and restart the instance, it can be restarted on any available host. When HostAffinity is defined, HostID is required. enum: - default - host type: string hostID: - description: HostID specifies the Dedicated Host on which - the instance must be started. + description: |- + HostID specifies the Dedicated Host on which the instance must be started. + This field is mutually exclusive with DynamicHostAllocation. + maxLength: 19 + pattern: ^h-[0-9a-f]{17}$ type: string iamInstanceProfile: description: IAMInstanceProfile is a name of an IAM instance @@ -1089,8 +1109,12 @@ spec: type: string type: object tenancy: - description: Tenancy indicates if instance should run on shared - or single-tenant hardware. + description: |- + Tenancy indicates if instance should run on shared or single-tenant hardware. + When Tenancy=host, AWS will attempt to find a suitable host from: + - Preexisting allocated hosts that have auto-placement enabled + - A specific host ID, if configured + - Allocating a new dedicated host if DynamicHostAllocation is configured enum: - default - dedicated @@ -1134,7 +1158,84 @@ spec: This value is used for autoscaling from zero operations as defined in: https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20210310-opt-in-autoscaling-from-zero.md type: object + conditions: + description: Conditions defines current service state of the AWSMachineTemplate. + items: + description: Condition defines an observation of a Cluster API resource + operational state. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This field may be empty. + maxLength: 10240 + minLength: 1 + type: string + reason: + description: |- + reason is the reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may be empty. + maxLength: 256 + minLength: 1 + type: string + severity: + description: |- + severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. + maxLength: 32 + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. + maxLength: 256 + minLength: 1 + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + nodeInfo: + description: |- + NodeInfo contains information about the node's architecture and operating system. + This value is used for autoscaling from zero operations as defined in: + https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20210310-opt-in-autoscaling-from-zero.md + properties: + architecture: + description: |- + Architecture is the CPU architecture of the node. + Its underlying type is a string and its value can be any of amd64, arm64. + enum: + - amd64 + - arm64 + type: string + operatingSystem: + description: |- + OperatingSystem is the operating system of the node. + Its underlying type is a string and its value can be any of linux, windows. + enum: + - linux + - windows + type: string + type: object type: object type: object served: true storage: true + subresources: + status: {} diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedmachinepools.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedmachinepools.yaml index 11fdfa422c..92927218c1 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedmachinepools.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedmachinepools.yaml @@ -987,6 +987,17 @@ spec: - name type: object type: array + nodeRepairConfig: + description: NodeRepairConfig specifies the node auto repair configuration + for the managed node group. + properties: + enabled: + default: false + description: |- + Enabled specifies whether node auto repair is enabled for the node group. + When enabled, EKS will automatically repair unhealthy nodes by replacing them. + type: boolean + type: object providerIDList: description: |- ProviderIDList are the provider IDs of instances in the diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_rosamachinepools.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_rosamachinepools.yaml index e6a27a9ddf..6605730aef 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_rosamachinepools.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_rosamachinepools.yaml @@ -88,6 +88,11 @@ spec: AvailabilityZone is an optinal field specifying the availability zone where instances of this machine pool should run For Multi-AZ clusters, you can create a machine pool in a Single-AZ of your choice. type: string + capacityReservationID: + description: |- + CapacityReservationID specifies the ID of an AWS On-Demand Capacity Reservation and Capacity Blocks for ML. + The CapacityReservationID must be pre-created in advance, before creating a NodePool. + type: string instanceType: description: InstanceType specifies the AWS instance type type: string diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_rosanetworks.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_rosanetworks.yaml new file mode 100644 index 0000000000..26c6852f8b --- /dev/null +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_rosanetworks.yaml @@ -0,0 +1,231 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.19.0 + name: rosanetworks.infrastructure.cluster.x-k8s.io +spec: + group: infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: ROSANetwork + listKind: ROSANetworkList + plural: rosanetworks + shortNames: + - rosanet + singular: rosanetwork + scope: Namespaced + versions: + - name: v1beta2 + schema: + openAPIV3Schema: + description: ROSANetwork is the schema for the rosanetworks API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ROSANetworkSpec defines the desired state of ROSANetwork + properties: + availabilityZoneCount: + description: |- + The number of availability zones to be used for creation of the network infrastructure. + You can specify anything between one and four, depending on the chosen AWS region. + Either AvailabilityZoneCount OR AvailabilityZones must be set. + minimum: 1 + type: integer + x-kubernetes-validations: + - message: availabilityZoneCount is immutable + rule: self == oldSelf + availabilityZones: + description: |- + The list of availability zones to be used for creation of the network infrastructure. + You can specify anything between one and four valid availability zones from a given region. + Either AvailabilityZones OR AvailabilityZoneCount must be set. + items: + type: string + type: array + x-kubernetes-validations: + - message: availabilityZones is immutable + rule: self == oldSelf + cidrBlock: + description: CIDR block to be used for the VPC + format: cidr + type: string + identityRef: + description: |- + IdentityRef is a reference to an identity to be used when reconciling rosa network. + If no identity is specified, the default identity for this controller will be used. + properties: + kind: + description: Kind of the identity. + enum: + - AWSClusterControllerIdentity + - AWSClusterRoleIdentity + - AWSClusterStaticIdentity + type: string + name: + description: Name of the identity. + minLength: 1 + type: string + required: + - kind + - name + type: object + region: + description: The AWS region in which the components of ROSA network + infrastruture are to be crated + type: string + x-kubernetes-validations: + - message: region is immutable + rule: self == oldSelf + stackName: + description: The name of the cloudformation stack under which the + network infrastructure would be created + type: string + x-kubernetes-validations: + - message: stackName is immutable + rule: self == oldSelf + stackTags: + additionalProperties: + type: string + description: |- + StackTags is an optional set of tags to add to the created cloudformation stack. + The stack tags will then be automatically applied to the supported AWS resources (VPC, subnets, ...). + type: object + required: + - cidrBlock + - region + - stackName + type: object + status: + description: ROSANetworkStatus defines the observed state of ROSANetwork + properties: + conditions: + description: Conditions specifies the conditions for ROSANetwork + items: + description: Condition defines an observation of a Cluster API resource + operational state. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This field may be empty. + maxLength: 10240 + minLength: 1 + type: string + reason: + description: |- + reason is the reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may be empty. + maxLength: 256 + minLength: 1 + type: string + severity: + description: |- + severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. + maxLength: 32 + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. + maxLength: 256 + minLength: 1 + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + resources: + description: Resources created in the cloudformation stack + items: + description: CFResource groups information pertaining to a resource + created as a part of a cloudformation stack + properties: + logicalId: + description: LogicalResourceID of the created resource. + type: string + physicalId: + description: PhysicalResourceID of the created resource. + type: string + reason: + description: Message pertaining to the status of the resource + type: string + resource: + description: 'Type of the created resource: AWS::EC2::VPC, AWS::EC2::Subnet, + ...' + type: string + status: + description: 'Status of the resource: CREATE_IN_PROGRESS, CREATE_COMPLETE, + ...' + type: string + required: + - logicalId + - physicalId + - reason + - resource + - status + type: object + type: array + subnets: + description: Array of created private, public subnets and availability + zones, grouped by availability zones + items: + description: ROSANetworkSubnet groups public and private subnet + and the availability zone in which the two subnets got created + properties: + availabilityZone: + description: Availability zone of the subnet pair, for example + us-west-2a + type: string + privateSubnet: + description: ID of the private subnet, for example subnet-07a20d6c41af2b725 + type: string + publicSubnet: + description: ID of the public subnet, for example subnet-0f7e49a3ce68ff338 + type: string + required: + - availabilityZone + - privateSubnet + - publicSubnet + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_rosaroleconfigs.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_rosaroleconfigs.yaml new file mode 100644 index 0000000000..6fc3252abc --- /dev/null +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_rosaroleconfigs.yaml @@ -0,0 +1,458 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.19.0 + name: rosaroleconfigs.infrastructure.cluster.x-k8s.io +spec: + group: infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: ROSARoleConfig + listKind: ROSARoleConfigList + plural: rosaroleconfigs + shortNames: + - rosarole + singular: rosaroleconfig + scope: Namespaced + versions: + - name: v1beta2 + schema: + openAPIV3Schema: + description: ROSARoleConfig is the Schema for the rosaroleconfigs API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: ROSARoleConfigSpec defines the desired state of ROSARoleConfig + properties: + accountRoleConfig: + description: AccountRoleConfig defines account-wide IAM roles before + creating your ROSA cluster. + properties: + path: + description: The arn path for the account/operator roles as well + as their policies. + type: string + permissionsBoundaryARN: + description: The ARN of the policy that is used to set the permissions + boundary for the account roles. + type: string + prefix: + description: User-defined prefix for all generated AWS account + role + maxLength: 4 + pattern: ^[a-z]([-a-z0-9]*[a-z0-9])?$ + type: string + x-kubernetes-validations: + - message: prefix is immutable + rule: self == oldSelf + sharedVPCConfig: + description: SharedVPCConfig is used to set up shared VPC. + properties: + routeRoleARN: + description: Role ARN associated with the private hosted zone + used for Hosted Control Plane cluster shared VPC, this role + contains policies to be used with Route 53 + type: string + vpcEndpointRoleArn: + description: Role ARN associated with the shared VPC used + for Hosted Control Plane clusters, this role contains policies + to be used with the VPC endpoint + type: string + type: object + version: + description: |- + Version of OpenShift that will be used to the roles tag in formate of x.y.z example; "4.19.0" + Setting the role OpenShift version tag does not affect the associated ROSAControlplane version. + type: string + x-kubernetes-validations: + - message: version is immutable + rule: self == oldSelf + required: + - prefix + - version + type: object + credentialsSecretRef: + description: CredentialsSecretRef references a secret with necessary + credentials to connect to the OCM API. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. Instances of this type with an empty value here are + almost certainly wrong. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + type: object + x-kubernetes-map-type: atomic + identityRef: + description: |- + IdentityRef is a reference to an identity to be used when reconciling the ROSA Role Config. + If no identity is specified, the default identity for this controller will be used. + properties: + kind: + description: Kind of the identity. + enum: + - AWSClusterControllerIdentity + - AWSClusterRoleIdentity + - AWSClusterStaticIdentity + type: string + name: + description: Name of the identity. + minLength: 1 + type: string + required: + - kind + - name + type: object + oidcProviderType: + default: Managed + description: OIDC provider type values are Managed or UnManaged. When + set to Unmanged OperatorRoleConfig OIDCID field must be provided. + enum: + - Managed + - Unmanaged + type: string + operatorRoleConfig: + description: OperatorRoleConfig defines cluster-specific operator + IAM roles based on your cluster configuration. + properties: + oidcID: + description: |- + OIDCID is the ID of the OIDC config that will be used to create the operator roles. + Cannot be set when OidcProviderType set to Managed + type: string + x-kubernetes-validations: + - message: oidcID is immutable + rule: self == oldSelf + permissionsBoundaryARN: + description: The ARN of the policy that is used to set the permissions + boundary for the operator roles. + type: string + prefix: + description: ' User-defined prefix for generated AWS operator + roles.' + maxLength: 4 + pattern: ^[a-z]([-a-z0-9]*[a-z0-9])?$ + type: string + x-kubernetes-validations: + - message: prefix is immutable + rule: self == oldSelf + sharedVPCConfig: + description: SharedVPCConfig is used to set up shared VPC. + properties: + routeRoleARN: + description: Role ARN associated with the private hosted zone + used for Hosted Control Plane cluster shared VPC, this role + contains policies to be used with Route 53 + type: string + vpcEndpointRoleArn: + description: Role ARN associated with the shared VPC used + for Hosted Control Plane clusters, this role contains policies + to be used with the VPC endpoint + type: string + type: object + required: + - prefix + type: object + required: + - accountRoleConfig + - oidcProviderType + - operatorRoleConfig + type: object + status: + description: ROSARoleConfigStatus defines the observed state of ROSARoleConfig + properties: + accountRolesRef: + description: Created Account roles that can be used to + properties: + installerRoleARN: + description: InstallerRoleARN is an AWS IAM role that OpenShift + Cluster Manager will assume to create the cluster.. + type: string + supportRoleARN: + description: |- + SupportRoleARN is an AWS IAM role used by Red Hat SREs to enable + access to the cluster account in order to provide support. + type: string + workerRoleARN: + description: WorkerRoleARN is an AWS IAM role that will be attached + to worker instances. + type: string + type: object + conditions: + description: Conditions specifies the ROSARoleConfig conditions + items: + description: Condition defines an observation of a Cluster API resource + operational state. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This field may be empty. + maxLength: 10240 + minLength: 1 + type: string + reason: + description: |- + reason is the reason for the condition's last transition in CamelCase. + The specific API may choose whether or not this field is considered a guaranteed API. + This field may be empty. + maxLength: 256 + minLength: 1 + type: string + severity: + description: |- + severity provides an explicit classification of Reason code, so the users or machines can immediately + understand the current situation and act accordingly. + The Severity field MUST be set only when Status=False. + maxLength: 32 + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + can be useful (see .node.status.conditions), the ability to deconflict is important. + maxLength: 256 + minLength: 1 + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + oidcID: + description: ID of created OIDC config + type: string + oidcProviderARN: + description: Create OIDC provider for operators to authenticate against + in an STS cluster. + type: string + operatorRolesRef: + description: AWS IAM roles used to perform credential requests by + the openshift operators. + properties: + controlPlaneOperatorARN: + description: "ControlPlaneOperatorARN is an ARN value referencing + a role appropriate for the Control Plane Operator.\n\nThe following + is an example of a valid policy document:\n\n{\n\t\"Version\": + \"2012-10-17\",\n\t\"Statement\": [\n\t\t{\n\t\t\t\"Effect\": + \"Allow\",\n\t\t\t\"Action\": [\n\t\t\t\t\"ec2:CreateVpcEndpoint\",\n\t\t\t\t\"ec2:DescribeVpcEndpoints\",\n\t\t\t\t\"ec2:ModifyVpcEndpoint\",\n\t\t\t\t\"ec2:DeleteVpcEndpoints\",\n\t\t\t\t\"ec2:CreateTags\",\n\t\t\t\t\"route53:ListHostedZones\",\n\t\t\t\t\"ec2:CreateSecurityGroup\",\n\t\t\t\t\"ec2:AuthorizeSecurityGroupIngress\",\n\t\t\t\t\"ec2:AuthorizeSecurityGroupEgress\",\n\t\t\t\t\"ec2:DeleteSecurityGroup\",\n\t\t\t\t\"ec2:RevokeSecurityGroupIngress\",\n\t\t\t\t\"ec2:RevokeSecurityGroupEgress\",\n\t\t\t\t\"ec2:DescribeSecurityGroups\",\n\t\t\t\t\"ec2:DescribeVpcs\",\n\t\t\t],\n\t\t\t\"Resource\": + \"*\"\n\t\t},\n\t\t{\n\t\t\t\"Effect\": \"Allow\",\n\t\t\t\"Action\": + [\n\t\t\t\t\"route53:ChangeResourceRecordSets\",\n\t\t\t\t\"route53:ListResourceRecordSets\"\n\t\t\t],\n\t\t\t\"Resource\": + \"arn:aws:route53:::%s\"\n\t\t}\n\t]\n}" + type: string + imageRegistryARN: + description: "ImageRegistryARN is an ARN value referencing a role + appropriate for the Image Registry Operator.\n\nThe following + is an example of a valid policy document:\n\n{\n\t\"Version\": + \"2012-10-17\",\n\t\"Statement\": [\n\t\t{\n\t\t\t\"Effect\": + \"Allow\",\n\t\t\t\"Action\": [\n\t\t\t\t\"s3:CreateBucket\",\n\t\t\t\t\"s3:DeleteBucket\",\n\t\t\t\t\"s3:PutBucketTagging\",\n\t\t\t\t\"s3:GetBucketTagging\",\n\t\t\t\t\"s3:PutBucketPublicAccessBlock\",\n\t\t\t\t\"s3:GetBucketPublicAccessBlock\",\n\t\t\t\t\"s3:PutEncryptionConfiguration\",\n\t\t\t\t\"s3:GetEncryptionConfiguration\",\n\t\t\t\t\"s3:PutLifecycleConfiguration\",\n\t\t\t\t\"s3:GetLifecycleConfiguration\",\n\t\t\t\t\"s3:GetBucketLocation\",\n\t\t\t\t\"s3:ListBucket\",\n\t\t\t\t\"s3:GetObject\",\n\t\t\t\t\"s3:PutObject\",\n\t\t\t\t\"s3:DeleteObject\",\n\t\t\t\t\"s3:ListBucketMultipartUploads\",\n\t\t\t\t\"s3:AbortMultipartUpload\",\n\t\t\t\t\"s3:ListMultipartUploadParts\"\n\t\t\t],\n\t\t\t\"Resource\": + \"*\"\n\t\t}\n\t]\n}" + type: string + ingressARN: + description: "The referenced role must have a trust relationship + that allows it to be assumed via web identity.\nhttps://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc.html.\nExample:\n{\n\t\t\"Version\": + \"2012-10-17\",\n\t\t\"Statement\": [\n\t\t\t{\n\t\t\t\t\"Effect\": + \"Allow\",\n\t\t\t\t\"Principal\": {\n\t\t\t\t\t\"Federated\": + \"{{ .ProviderARN }}\"\n\t\t\t\t},\n\t\t\t\t\t\"Action\": \"sts:AssumeRoleWithWebIdentity\",\n\t\t\t\t\"Condition\": + {\n\t\t\t\t\t\"StringEquals\": {\n\t\t\t\t\t\t\"{{ .ProviderName + }}:sub\": {{ .ServiceAccounts }}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t]\n\t}\n\nIngressARN + is an ARN value referencing a role appropriate for the Ingress + Operator.\n\nThe following is an example of a valid policy document:\n\n{\n\t\"Version\": + \"2012-10-17\",\n\t\"Statement\": [\n\t\t{\n\t\t\t\"Effect\": + \"Allow\",\n\t\t\t\"Action\": [\n\t\t\t\t\"elasticloadbalancing:DescribeLoadBalancers\",\n\t\t\t\t\"tag:GetResources\",\n\t\t\t\t\"route53:ListHostedZones\"\n\t\t\t],\n\t\t\t\"Resource\": + \"*\"\n\t\t},\n\t\t{\n\t\t\t\"Effect\": \"Allow\",\n\t\t\t\"Action\": + [\n\t\t\t\t\"route53:ChangeResourceRecordSets\"\n\t\t\t],\n\t\t\t\"Resource\": + [\n\t\t\t\t\"arn:aws:route53:::PUBLIC_ZONE_ID\",\n\t\t\t\t\"arn:aws:route53:::PRIVATE_ZONE_ID\"\n\t\t\t]\n\t\t}\n\t]\n}" + type: string + kmsProviderARN: + type: string + kubeCloudControllerARN: + description: |- + KubeCloudControllerARN is an ARN value referencing a role appropriate for the KCM/KCC. + Source: https://cloud-provider-aws.sigs.k8s.io/prerequisites/#iam-policies + + The following is an example of a valid policy document: + + { + "Version": "2012-10-17", + "Statement": [ + { + "Action": [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeTags", + "ec2:DescribeAvailabilityZones", + "ec2:DescribeInstances", + "ec2:DescribeImages", + "ec2:DescribeRegions", + "ec2:DescribeRouteTables", + "ec2:DescribeSecurityGroups", + "ec2:DescribeSubnets", + "ec2:DescribeVolumes", + "ec2:CreateSecurityGroup", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:ModifyInstanceAttribute", + "ec2:ModifyVolume", + "ec2:AttachVolume", + "ec2:AuthorizeSecurityGroupIngress", + "ec2:CreateRoute", + "ec2:DeleteRoute", + "ec2:DeleteSecurityGroup", + "ec2:DeleteVolume", + "ec2:DetachVolume", + "ec2:RevokeSecurityGroupIngress", + "ec2:DescribeVpcs", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:AttachLoadBalancerToSubnets", + "elasticloadbalancing:ApplySecurityGroupsToLoadBalancer", + "elasticloadbalancing:CreateLoadBalancer", + "elasticloadbalancing:CreateLoadBalancerPolicy", + "elasticloadbalancing:CreateLoadBalancerListeners", + "elasticloadbalancing:ConfigureHealthCheck", + "elasticloadbalancing:DeleteLoadBalancer", + "elasticloadbalancing:DeleteLoadBalancerListeners", + "elasticloadbalancing:DescribeLoadBalancers", + "elasticloadbalancing:DescribeLoadBalancerAttributes", + "elasticloadbalancing:DetachLoadBalancerFromSubnets", + "elasticloadbalancing:DeregisterInstancesFromLoadBalancer", + "elasticloadbalancing:ModifyLoadBalancerAttributes", + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + "elasticloadbalancing:SetLoadBalancerPoliciesForBackendServer", + "elasticloadbalancing:AddTags", + "elasticloadbalancing:CreateListener", + "elasticloadbalancing:CreateTargetGroup", + "elasticloadbalancing:DeleteListener", + "elasticloadbalancing:DeleteTargetGroup", + "elasticloadbalancing:DeregisterTargets", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeLoadBalancerPolicies", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:DescribeTargetHealth", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:ModifyTargetGroup", + "elasticloadbalancing:RegisterTargets", + "elasticloadbalancing:SetLoadBalancerPoliciesOfListener", + "iam:CreateServiceLinkedRole", + "kms:DescribeKey" + ], + "Resource": [ + "*" + ], + "Effect": "Allow" + } + ] + } + type: string + networkARN: + description: "NetworkARN is an ARN value referencing a role appropriate + for the Network Operator.\n\nThe following is an example of + a valid policy document:\n\n{\n\t\"Version\": \"2012-10-17\",\n\t\"Statement\": + [\n\t\t{\n\t\t\t\"Effect\": \"Allow\",\n\t\t\t\"Action\": [\n\t\t\t\t\"ec2:DescribeInstances\",\n + \ \"ec2:DescribeInstanceStatus\",\n \"ec2:DescribeInstanceTypes\",\n + \ \"ec2:UnassignPrivateIpAddresses\",\n \"ec2:AssignPrivateIpAddresses\",\n + \ \"ec2:UnassignIpv6Addresses\",\n \"ec2:AssignIpv6Addresses\",\n + \ \"ec2:DescribeSubnets\",\n \"ec2:DescribeNetworkInterfaces\"\n\t\t\t],\n\t\t\t\"Resource\": + \"*\"\n\t\t}\n\t]\n}" + type: string + nodePoolManagementARN: + description: "NodePoolManagementARN is an ARN value referencing + a role appropriate for the CAPI Controller.\n\nThe following + is an example of a valid policy document:\n\n{\n \"Version\": + \"2012-10-17\",\n \"Statement\": [\n {\n \"Action\": [\n + \ \"ec2:AssociateRouteTable\",\n \"ec2:AttachInternetGateway\",\n + \ \"ec2:AuthorizeSecurityGroupIngress\",\n \"ec2:CreateInternetGateway\",\n + \ \"ec2:CreateNatGateway\",\n \"ec2:CreateRoute\",\n + \ \"ec2:CreateRouteTable\",\n \"ec2:CreateSecurityGroup\",\n + \ \"ec2:CreateSubnet\",\n \"ec2:CreateTags\",\n \"ec2:DeleteInternetGateway\",\n + \ \"ec2:DeleteNatGateway\",\n \"ec2:DeleteRouteTable\",\n + \ \"ec2:DeleteSecurityGroup\",\n \"ec2:DeleteSubnet\",\n + \ \"ec2:DeleteTags\",\n \"ec2:DescribeAccountAttributes\",\n + \ \"ec2:DescribeAddresses\",\n \"ec2:DescribeAvailabilityZones\",\n + \ \"ec2:DescribeImages\",\n \"ec2:DescribeInstances\",\n + \ \"ec2:DescribeInternetGateways\",\n \"ec2:DescribeNatGateways\",\n + \ \"ec2:DescribeNetworkInterfaces\",\n \"ec2:DescribeNetworkInterfaceAttribute\",\n + \ \"ec2:DescribeRouteTables\",\n \"ec2:DescribeSecurityGroups\",\n + \ \"ec2:DescribeSubnets\",\n \"ec2:DescribeVpcs\",\n + \ \"ec2:DescribeVpcAttribute\",\n \"ec2:DescribeVolumes\",\n + \ \"ec2:DetachInternetGateway\",\n \"ec2:DisassociateRouteTable\",\n + \ \"ec2:DisassociateAddress\",\n \"ec2:ModifyInstanceAttribute\",\n + \ \"ec2:ModifyNetworkInterfaceAttribute\",\n \"ec2:ModifySubnetAttribute\",\n + \ \"ec2:RevokeSecurityGroupIngress\",\n \"ec2:RunInstances\",\n + \ \"ec2:TerminateInstances\",\n \"tag:GetResources\",\n + \ \"ec2:CreateLaunchTemplate\",\n \"ec2:CreateLaunchTemplateVersion\",\n + \ \"ec2:DescribeLaunchTemplates\",\n \"ec2:DescribeLaunchTemplateVersions\",\n + \ \"ec2:DeleteLaunchTemplate\",\n \"ec2:DeleteLaunchTemplateVersions\"\n + \ ],\n \"Resource\": [\n \"*\"\n ],\n \"Effect\": + \"Allow\"\n },\n {\n \"Condition\": {\n \"StringLike\": + {\n \"iam:AWSServiceName\": \"elasticloadbalancing.amazonaws.com\"\n + \ }\n },\n \"Action\": [\n \"iam:CreateServiceLinkedRole\"\n + \ ],\n \"Resource\": [\n \"arn:*:iam::*:role/aws-service-role/elasticloadbalancing.amazonaws.com/AWSServiceRoleForElasticLoadBalancing\"\n + \ ],\n \"Effect\": \"Allow\"\n },\n {\n \"Action\": + [\n \"iam:PassRole\"\n ],\n \"Resource\": [\n + \ \"arn:*:iam::*:role/*-worker-role\"\n ],\n \"Effect\": + \"Allow\"\n },\n\t {\n\t \t\"Effect\": \"Allow\",\n\t \t\"Action\": + [\n\t \t\t\"kms:Decrypt\",\n\t \t\t\"kms:ReEncrypt\",\n\t + \ \t\t\"kms:GenerateDataKeyWithoutPlainText\",\n\t \t\t\"kms:DescribeKey\"\n\t + \ \t],\n\t \t\"Resource\": \"*\"\n\t },\n\t {\n\t \t\"Effect\": + \"Allow\",\n\t \t\"Action\": [\n\t \t\t\"kms:CreateGrant\"\n\t + \ \t],\n\t \t\"Resource\": \"*\",\n\t \t\"Condition\": {\n\t + \ \t\t\"Bool\": {\n\t \t\t\t\"kms:GrantIsForAWSResource\": + true\n\t \t\t}\n\t \t}\n\t }\n ]\n}" + type: string + storageARN: + description: "StorageARN is an ARN value referencing a role appropriate + for the Storage Operator.\n\nThe following is an example of + a valid policy document:\n\n{\n\t\"Version\": \"2012-10-17\",\n\t\"Statement\": + [\n\t\t{\n\t\t\t\"Effect\": \"Allow\",\n\t\t\t\"Action\": [\n\t\t\t\t\"ec2:AttachVolume\",\n\t\t\t\t\"ec2:CreateSnapshot\",\n\t\t\t\t\"ec2:CreateTags\",\n\t\t\t\t\"ec2:CreateVolume\",\n\t\t\t\t\"ec2:DeleteSnapshot\",\n\t\t\t\t\"ec2:DeleteTags\",\n\t\t\t\t\"ec2:DeleteVolume\",\n\t\t\t\t\"ec2:DescribeInstances\",\n\t\t\t\t\"ec2:DescribeSnapshots\",\n\t\t\t\t\"ec2:DescribeTags\",\n\t\t\t\t\"ec2:DescribeVolumes\",\n\t\t\t\t\"ec2:DescribeVolumesModifications\",\n\t\t\t\t\"ec2:DetachVolume\",\n\t\t\t\t\"ec2:ModifyVolume\"\n\t\t\t],\n\t\t\t\"Resource\": + \"*\"\n\t\t}\n\t]\n}" + type: string + required: + - controlPlaneOperatorARN + - imageRegistryARN + - ingressARN + - kmsProviderARN + - kubeCloudControllerARN + - networkARN + - nodePoolManagementARN + - storageARN + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index c3f6177556..097bdc2873 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -26,6 +26,8 @@ resources: - bases/controlplane.cluster.x-k8s.io_rosacontrolplanes.yaml - bases/infrastructure.cluster.x-k8s.io_rosaclusters.yaml - bases/infrastructure.cluster.x-k8s.io_rosamachinepools.yaml +- bases/infrastructure.cluster.x-k8s.io_rosaroleconfigs.yaml +- bases/infrastructure.cluster.x-k8s.io_rosanetworks.yaml # +kubebuilder:scaffold:crdkustomizeresource patchesStrategicMerge: @@ -57,6 +59,8 @@ patchesStrategicMerge: - patches/cainjection_in_awsmanagedclustertemplates.yaml - patches/cainjection_in_eksconfigs.yaml - patches/cainjection_in_eksconfigtemplates.yaml +- patches/cainjection_in_rosaroleconfigs.yaml +- patches/cainjection_in_rosanetworks.yaml # +kubebuilder:scaffold:crdkustomizecainjectionpatch # [LABEL] To enable label, uncomment all the sections with [LABEL] prefix. diff --git a/config/crd/patches/cainjection_in_rosanetworks.yaml b/config/crd/patches/cainjection_in_rosanetworks.yaml new file mode 100644 index 0000000000..91ac2be238 --- /dev/null +++ b/config/crd/patches/cainjection_in_rosanetworks.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: rosanetworks.infrastructure.cluster.x-k8s.io diff --git a/config/crd/patches/cainjection_in_rosaroleconfigs.yaml b/config/crd/patches/cainjection_in_rosaroleconfigs.yaml new file mode 100644 index 0000000000..8a3a3e05ee --- /dev/null +++ b/config/crd/patches/cainjection_in_rosaroleconfigs.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: rosaroleconfigs.infrastructure.cluster.x-k8s.io diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index a2ed671ffb..b3a99a0b2d 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -36,6 +36,14 @@ rules: - get - list - watch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - get + - list + - watch - apiGroups: - authentication.k8s.io resources: @@ -73,6 +81,7 @@ rules: - clusters/status - machinedeployments - machines/status + - machinesets verbs: - get - list @@ -175,11 +184,21 @@ rules: resources: - awsclusters/status - awsfargateprofiles/status + - awsmachinetemplates/status - rosaclusters/status + - rosanetworks/status + - rosaroleconfigs/status verbs: - get - patch - update +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - awsmachinepools/finalizers + verbs: + - delete + - update - apiGroups: - infrastructure.cluster.x-k8s.io resources: @@ -198,6 +217,8 @@ rules: resources: - awsmachines - rosamachinepools + - rosanetworks + - rosaroleconfigs verbs: - create - delete @@ -210,6 +231,8 @@ rules: - infrastructure.cluster.x-k8s.io resources: - rosamachinepools/finalizers + - rosanetworks/finalizers + - rosaroleconfigs/finalizers verbs: - update - apiGroups: diff --git a/config/webhook/kustomizeconfig.yaml b/config/webhook/kustomizeconfig.yaml index 25e21e3c96..ca3af67d76 100644 --- a/config/webhook/kustomizeconfig.yaml +++ b/config/webhook/kustomizeconfig.yaml @@ -1,25 +1,4 @@ -# the following config is for teaching kustomize where to look at when substituting vars. -# It requires kustomize v2.1.0 or newer to work properly. -nameReference: -- kind: Service - version: v1 - fieldSpecs: - - kind: MutatingWebhookConfiguration - group: admissionregistration.k8s.io - path: webhooks/clientConfig/service/name - - kind: ValidatingWebhookConfiguration - group: admissionregistration.k8s.io - path: webhooks/clientConfig/service/name - -namespace: -- kind: MutatingWebhookConfiguration - group: admissionregistration.k8s.io - path: webhooks/clientConfig/service/namespace - create: true -- kind: ValidatingWebhookConfiguration - group: admissionregistration.k8s.io - path: webhooks/clientConfig/service/namespace - create: true - +# This directive should be removed when vars are removed from this +# kustomization varReference: - path: metadata/annotations diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index 91dd9aa54b..38ec935e11 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -223,6 +223,50 @@ webhooks: resources: - rosamachinepools sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-rosanetwork + failurePolicy: Fail + matchPolicy: Equivalent + name: default.rosanetwork.infrastructure.cluster.x-k8s.io + rules: + - apiGroups: + - infrastructure.cluster.x-k8s.io + apiVersions: + - v1beta2 + operations: + - CREATE + - UPDATE + resources: + - rosanetworks + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-infrastructure-cluster-x-k8s-io-v1beta2-rosaroleconfig + failurePolicy: Fail + matchPolicy: Equivalent + name: default.rosaroleconfig.infrastructure.cluster.x-k8s.io + rules: + - apiGroups: + - infrastructure.cluster.x-k8s.io + apiVersions: + - v1beta2 + operations: + - CREATE + - UPDATE + resources: + - rosaroleconfigs + sideEffects: None - admissionReviewVersions: - v1 - v1beta1 @@ -581,6 +625,50 @@ webhooks: resources: - rosamachinepools sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-rosanetwork + failurePolicy: Fail + matchPolicy: Equivalent + name: validation.rosanetwork.infrastructure.cluster.x-k8s.io + rules: + - apiGroups: + - infrastructure.cluster.x-k8s.io + apiVersions: + - v1beta2 + operations: + - CREATE + - UPDATE + resources: + - rosanetworks + sideEffects: None +- admissionReviewVersions: + - v1 + - v1beta1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-infrastructure-cluster-x-k8s-io-v1beta2-rosaroleconfig + failurePolicy: Fail + matchPolicy: Equivalent + name: validation.rosaroleconfig.infrastructure.cluster.x-k8s.io + rules: + - apiGroups: + - infrastructure.cluster.x-k8s.io + apiVersions: + - v1beta2 + operations: + - CREATE + - UPDATE + resources: + - rosaroleconfigs + sideEffects: None - admissionReviewVersions: - v1 - v1beta1 diff --git a/controllers/awscluster_controller.go b/controllers/awscluster_controller.go index d0ffbbc462..4462fefd41 100644 --- a/controllers/awscluster_controller.go +++ b/controllers/awscluster_controller.go @@ -27,6 +27,7 @@ import ( kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -49,10 +50,11 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" infrautilconditions "sigs.k8s.io/cluster-api-provider-aws/v2/util/conditions" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" capiannotations "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -291,19 +293,19 @@ func (r *AWSClusterReconciler) reconcileLoadBalancer(ctx context.Context, cluste if err := elbService.ReconcileLoadbalancers(ctx); err != nil { clusterScope.Error(err, "failed to reconcile load balancer") - conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.LoadBalancerFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), "%s", err.Error()) + v1beta1conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.LoadBalancerFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), "%s", err.Error()) return nil, err } if awsCluster.Status.Network.APIServerELB.DNSName == "" { - conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForDNSNameReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForDNSNameReason, clusterv1beta1.ConditionSeverityInfo, "") clusterScope.Info("Waiting on API server ELB DNS name") return &retryAfterDuration, nil } - conditions.MarkTrue(awsCluster, infrav1.LoadBalancerReadyCondition) + v1beta1conditions.MarkTrue(awsCluster, infrav1.LoadBalancerReadyCondition) - awsCluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ + awsCluster.Spec.ControlPlaneEndpoint = clusterv1beta1.APIEndpoint{ Host: awsCluster.Status.Network.APIServerELB.DNSName, Port: clusterScope.APIServerPort(), } @@ -336,12 +338,12 @@ func (r *AWSClusterReconciler) reconcileNormal(ctx context.Context, clusterScope if err := sgService.ReconcileSecurityGroups(); err != nil { clusterScope.Error(err, "failed to reconcile security groups") - conditions.MarkFalse(awsCluster, infrav1.ClusterSecurityGroupsReadyCondition, infrav1.ClusterSecurityGroupReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), "%s", err.Error()) + v1beta1conditions.MarkFalse(awsCluster, infrav1.ClusterSecurityGroupsReadyCondition, infrav1.ClusterSecurityGroupReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), "%s", err.Error()) return reconcile.Result{}, err } if err := ec2Service.ReconcileBastion(); err != nil { - conditions.MarkFalse(awsCluster, infrav1.BastionHostReadyCondition, infrav1.BastionHostFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), "%s", err.Error()) + v1beta1conditions.MarkFalse(awsCluster, infrav1.BastionHostReadyCondition, infrav1.BastionHostFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), "%s", err.Error()) clusterScope.Error(err, "failed to reconcile bastion host") return reconcile.Result{}, err } @@ -361,10 +363,10 @@ func (r *AWSClusterReconciler) reconcileNormal(ctx context.Context, clusterScope } if err := s3Service.ReconcileBucket(ctx); err != nil { - conditions.MarkFalse(awsCluster, infrav1.S3BucketReadyCondition, infrav1.S3BucketFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(awsCluster, infrav1.S3BucketReadyCondition, infrav1.S3BucketFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile S3 Bucket for AWSCluster %s/%s", awsCluster.Namespace, awsCluster.Name) } - conditions.MarkTrue(awsCluster, infrav1.S3BucketReadyCondition) + v1beta1conditions.MarkTrue(awsCluster, infrav1.S3BucketReadyCondition) for _, subnet := range clusterScope.Subnets().FilterPrivate() { found := false @@ -375,8 +377,8 @@ func (r *AWSClusterReconciler) reconcileNormal(ctx context.Context, clusterScope } } - clusterScope.SetFailureDomain(subnet.AvailabilityZone, clusterv1.FailureDomainSpec{ - ControlPlane: found, + clusterScope.SetFailureDomain(subnet.AvailabilityZone, clusterv1.FailureDomain{ + ControlPlane: ptr.To(found), }) } @@ -419,18 +421,18 @@ func (r *AWSClusterReconciler) requeueAWSClusterForUnpausedCluster(_ context.Con } // Make sure the ref is set - if c.Spec.InfrastructureRef == nil { + if !c.Spec.InfrastructureRef.IsDefined() { log.Trace("Cluster does not have an InfrastructureRef, skipping mapping.") return nil } - if c.Spec.InfrastructureRef.GroupVersionKind().Kind != "AWSCluster" { + if c.Spec.InfrastructureRef.Kind != "AWSCluster" { log.Trace("Cluster has an InfrastructureRef for a different type, skipping mapping.") return nil } awsCluster := &infrav1.AWSCluster{} - key := types.NamespacedName{Namespace: c.Spec.InfrastructureRef.Namespace, Name: c.Spec.InfrastructureRef.Name} + key := types.NamespacedName{Namespace: c.Namespace, Name: c.Spec.InfrastructureRef.Name} if err := r.Get(ctx, key, awsCluster); err != nil { log.Error(err, "Failed to get AWS cluster") @@ -457,21 +459,21 @@ func (r *AWSClusterReconciler) checkForExternalControlPlaneLoadBalancer(clusterS switch { case len(awsCluster.Spec.ControlPlaneEndpoint.Host) == 0 && awsCluster.Spec.ControlPlaneEndpoint.Port == 0: clusterScope.Info("AWSCluster control plane endpoint is still non-populated") - conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForExternalControlPlaneEndpointReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForExternalControlPlaneEndpointReason, clusterv1beta1.ConditionSeverityInfo, "") return &requeueAfterPeriod case len(awsCluster.Spec.ControlPlaneEndpoint.Host) == 0: clusterScope.Info("AWSCluster control plane endpoint host is still non-populated") - conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForExternalControlPlaneEndpointReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForExternalControlPlaneEndpointReason, clusterv1beta1.ConditionSeverityInfo, "") return &requeueAfterPeriod case awsCluster.Spec.ControlPlaneEndpoint.Port == 0: clusterScope.Info("AWSCluster control plane endpoint port is still non-populated") - conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForExternalControlPlaneEndpointReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForExternalControlPlaneEndpointReason, clusterv1beta1.ConditionSeverityInfo, "") return &requeueAfterPeriod default: - conditions.MarkTrue(awsCluster, infrav1.LoadBalancerReadyCondition) + v1beta1conditions.MarkTrue(awsCluster, infrav1.LoadBalancerReadyCondition) return nil } @@ -485,7 +487,7 @@ func (r *AWSClusterReconciler) dependencyCount(ctx context.Context, clusterScope listOptions := []client.ListOption{ client.InNamespace(namespace), - client.MatchingLabels(map[string]string{clusterv1.ClusterNameLabel: clusterName}), + client.MatchingLabels(map[string]string{clusterv1beta1.ClusterNameLabel: clusterName}), } machines := &infrav1.AWSMachineList{} diff --git a/controllers/awscluster_controller_test.go b/controllers/awscluster_controller_test.go index 64dbd30c44..491a90cde5 100644 --- a/controllers/awscluster_controller_test.go +++ b/controllers/awscluster_controller_test.go @@ -43,7 +43,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/network" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/securitygroup" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" ) @@ -156,7 +156,7 @@ func TestAWSClusterReconcilerIntegrationTests(t *testing.T) { g.Expect(cluster.Spec.ControlPlaneEndpoint.Host).To(BeEmpty()) g.Expect(cluster.Spec.ControlPlaneEndpoint.Port).To(BeZero()) expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{ - {conditionType: infrav1.LoadBalancerReadyCondition, status: corev1.ConditionFalse, severity: clusterv1.ConditionSeverityInfo, reason: infrav1.WaitForExternalControlPlaneEndpointReason}, + {conditionType: infrav1.LoadBalancerReadyCondition, status: corev1.ConditionFalse, severity: clusterv1beta1.ConditionSeverityInfo, reason: infrav1.WaitForExternalControlPlaneEndpointReason}, }) // Mimicking an external operator patching the cluster with an already provisioned Load Balancer: // this could be done by a human who provisioned a LB, or by a Control Plane provider. @@ -651,15 +651,15 @@ func TestAWSClusterReconcilerIntegrationTests(t *testing.T) { _, err = reconciler.reconcileDelete(ctx, cs) g.Expect(err).To(BeNil()) expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{ - {infrav1.LoadBalancerReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, - {infrav1.BastionHostReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, - {infrav1.SecondaryCidrsReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletingReason}, - {infrav1.RouteTablesReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, - {infrav1.VpcEndpointsReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, - {infrav1.NatGatewaysReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, - {infrav1.InternetGatewayReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, - {infrav1.SubnetsReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, - {infrav1.VpcReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, + {infrav1.LoadBalancerReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, clusterv1beta1.DeletedReason}, + {infrav1.BastionHostReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, clusterv1beta1.DeletedReason}, + {infrav1.SecondaryCidrsReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, clusterv1beta1.DeletingReason}, + {infrav1.RouteTablesReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, clusterv1beta1.DeletedReason}, + {infrav1.VpcEndpointsReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, clusterv1beta1.DeletedReason}, + {infrav1.NatGatewaysReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, clusterv1beta1.DeletedReason}, + {infrav1.InternetGatewayReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, clusterv1beta1.DeletedReason}, + {infrav1.SubnetsReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, clusterv1beta1.DeletedReason}, + {infrav1.VpcReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, clusterv1beta1.DeletedReason}, }) }) } diff --git a/controllers/awscluster_controller_unit_test.go b/controllers/awscluster_controller_unit_test.go index ee2d0bb9cf..80280dff69 100644 --- a/controllers/awscluster_controller_unit_test.go +++ b/controllers/awscluster_controller_unit_test.go @@ -38,7 +38,8 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/mock_services" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" ) @@ -67,10 +68,19 @@ func TestAWSClusterReconcilerReconcile(t *testing.T) { expectError: false, }, { - name: "Should not Reconcile if cluster is paused", - awsCluster: &infrav1.AWSCluster{ObjectMeta: metav1.ObjectMeta{GenerateName: "aws-test-", Annotations: map[string]string{clusterv1.PausedAnnotation: ""}}}, - ownerCluster: &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{GenerateName: "capi-test-"}}, - expectError: false, + name: "Should not Reconcile if cluster is paused", + awsCluster: &infrav1.AWSCluster{ObjectMeta: metav1.ObjectMeta{GenerateName: "aws-test-", Annotations: map[string]string{clusterv1.PausedAnnotation: ""}}}, + ownerCluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{GenerateName: "capi-test-"}, + Spec: clusterv1.ClusterSpec{ + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSCluster", + Name: "aws-cluster", + APIGroup: infrav1.GroupVersion.Group, + }, + }, + }, + expectError: false, }, { name: "Should Reconcile successfully if no AWSCluster found", @@ -320,7 +330,7 @@ func TestAWSClusterReconcileOperations(t *testing.T) { g.Expect(err).To(BeNil()) _, err = reconciler.reconcileNormal(context.TODO(), cs) g.Expect(err).ToNot(BeNil()) - expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{{infrav1.ClusterSecurityGroupsReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.ClusterSecurityGroupReconciliationFailedReason}}) + expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{{infrav1.ClusterSecurityGroupsReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityWarning, infrav1.ClusterSecurityGroupReconciliationFailedReason}}) }) t.Run("Should fail AWSCluster create with BastionHostReadyCondition status false", func(t *testing.T) { g := NewWithT(t) @@ -343,7 +353,7 @@ func TestAWSClusterReconcileOperations(t *testing.T) { g.Expect(err).To(BeNil()) _, err = reconciler.reconcileNormal(context.TODO(), cs) g.Expect(err).ToNot(BeNil()) - expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{{infrav1.BastionHostReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.BastionHostFailedReason}}) + expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{{infrav1.BastionHostReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityWarning, infrav1.BastionHostFailedReason}}) }) t.Run("Should fail AWSCluster create with failure in LoadBalancer reconciliation", func(t *testing.T) { g := NewWithT(t) @@ -367,7 +377,7 @@ func TestAWSClusterReconcileOperations(t *testing.T) { g.Expect(err).To(BeNil()) _, err = reconciler.reconcileNormal(context.TODO(), cs) g.Expect(err).ToNot(BeNil()) - expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{{infrav1.LoadBalancerReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.LoadBalancerFailedReason}}) + expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{{infrav1.LoadBalancerReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityWarning, infrav1.LoadBalancerFailedReason}}) }) t.Run("Should fail AWSCluster create with LoadBalancer reconcile failure with WaitForDNSName condition as false", func(t *testing.T) { g := NewWithT(t) @@ -391,7 +401,7 @@ func TestAWSClusterReconcileOperations(t *testing.T) { g.Expect(err).To(BeNil()) _, err = reconciler.reconcileNormal(context.TODO(), cs) g.Expect(err).To(BeNil()) - expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{{infrav1.LoadBalancerReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, infrav1.WaitForDNSNameReason}}) + expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{{infrav1.LoadBalancerReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, infrav1.WaitForDNSNameReason}}) }) }) }) diff --git a/controllers/awsmachine_annotations.go b/controllers/awsmachine_annotations.go index 1ae37d16cb..6f26f9b0d9 100644 --- a/controllers/awsmachine_annotations.go +++ b/controllers/awsmachine_annotations.go @@ -18,6 +18,7 @@ package controllers import ( "encoding/json" + "maps" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ) @@ -40,13 +41,14 @@ func (r *AWSMachineReconciler) updateMachineAnnotationJSON(machine *infrav1.AWSM // `content`. func (r *AWSMachineReconciler) updateMachineAnnotation(machine *infrav1.AWSMachine, annotation, content string) { // Get the annotations - annotations := machine.GetAnnotations() + annotations := maps.Clone(machine.GetAnnotations()) - // Set our annotation to the given content. - if annotations != nil { - annotations[annotation] = content + if annotations == nil { + annotations = map[string]string{} } + annotations[annotation] = content + // Update the machine object with these annotations machine.SetAnnotations(annotations) } diff --git a/controllers/awsmachine_controller.go b/controllers/awsmachine_controller.go index 445bab678c..ab76af1cf9 100644 --- a/controllers/awsmachine_controller.go +++ b/controllers/awsmachine_controller.go @@ -60,9 +60,10 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -145,11 +146,13 @@ func (r *AWSMachineReconciler) getObjectStoreService(scope scope.S3Scope) servic // +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=*,verbs=get;list;watch // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachines,verbs=create;get;list;watch;update;patch;delete // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachines/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachinepools/finalizers,verbs=update // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines,verbs=get;list;watch;delete // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machines/status,verbs=get;list;watch // +kubebuilder:rbac:groups="",resources=secrets;,verbs=get;list;watch // +kubebuilder:rbac:groups="",resources=namespaces,verbs=get;list;watch // +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch +// +kubebuilder:rbac:groups=apiextensions.k8s.io,resources=customresourcedefinitions,verbs=get;list;watch func (r *AWSMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { log := logger.FromContext(ctx) @@ -294,7 +297,7 @@ func (r *AWSMachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Ma return controller.Watch( source.Kind[client.Object](mgr.GetCache(), &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(requeueAWSMachinesForUnpausedCluster), - predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), log.GetLogger())), + predicates.ClusterPausedTransitionsOrInfrastructureProvisioned(mgr.GetScheme(), log.GetLogger())), ) } @@ -303,9 +306,11 @@ func (r *AWSMachineReconciler) reconcileDelete(ctx context.Context, machineScope ec2Service := r.getEC2Service(ec2Scope) - if err := r.deleteBootstrapData(ctx, machineScope, clusterScope, objectStoreScope); err != nil { - machineScope.Error(err, "unable to delete machine") - return ctrl.Result{}, err + if !machineScope.IsMachinePoolMachine() { + if err := r.deleteBootstrapData(ctx, machineScope, clusterScope, objectStoreScope); err != nil { + machineScope.Error(err, "unable to delete AWSMachine bootstrap data") + return ctrl.Result{}, err + } } instance, err := r.findInstance(machineScope, ec2Service) @@ -335,13 +340,13 @@ func (r *AWSMachineReconciler) reconcileDelete(ctx context.Context, machineScope // all the other errors are blocking. // Because we are reconciling all load balancers, attempt to treat the error as a list of errors. if err = kerrors.FilterOut(err, elb.IsAccessDenied, elb.IsNotFound); err != nil { - conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return ctrl.Result{}, errors.Errorf("failed to reconcile LB attachment: %+v", err) } } if machineScope.IsControlPlane() { - conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") } if feature.Gates.Enabled(feature.EventBridgeInstanceState) { @@ -360,13 +365,43 @@ func (r *AWSMachineReconciler) reconcileDelete(ctx context.Context, machineScope return ctrl.Result{RequeueAfter: time.Minute}, nil case infrav1.InstanceStateTerminated: machineScope.Info("EC2 instance terminated successfully", "instance-id", instance.ID) + + // Handle dedicated host cleanup AFTER instance is confirmed terminated + if machineScope.AWSMachine.Status.DedicatedHost != nil && + machineScope.AWSMachine.Status.DedicatedHost.ID != nil && + machineScope.AWSMachine.Spec.DynamicHostAllocation != nil { + hostID := *machineScope.AWSMachine.Status.DedicatedHost.ID + + // Attempt to release the dedicated host + machineScope.Info("Releasing dynamically allocated dedicated host", "hostID", hostID) + if err := ec2Service.ReleaseDedicatedHost(ctx, hostID); err != nil { + machineScope.Error(err, "failed to release dedicated host", "hostID", hostID) + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1.DedicatedHostReleaseCondition, infrav1.DedicatedHostReleaseFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedReleaseHost", "Failed to release dedicated host %s: %v", hostID, err) + return ctrl.Result{}, err + } + + // Host release succeeded + machineScope.Info("Successfully released dedicated host", "hostID", hostID) + r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeNormal, "SuccessfulReleaseHost", "Released dedicated host %s", hostID) + + // Mark the condition as succeeded + v1beta1conditions.MarkTrue(machineScope.AWSMachine, infrav1.DedicatedHostReleaseCondition) + + // Patch the object to persist success state + if err := machineScope.PatchObject(); err != nil { + machineScope.Error(err, "failed to patch object after successful host release") + return ctrl.Result{}, err + } + } + controllerutil.RemoveFinalizer(machineScope.AWSMachine, infrav1.MachineFinalizer) return ctrl.Result{}, nil default: machineScope.Info("Terminating EC2 instance", "instance-id", instance.ID) // Set the InstanceReadyCondition and patch the object before the blocking operation - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := machineScope.PatchObject(); err != nil { machineScope.Error(err, "failed to patch object") return ctrl.Result{}, err @@ -374,11 +409,11 @@ func (r *AWSMachineReconciler) reconcileDelete(ctx context.Context, machineScope if err := ec2Service.TerminateInstance(instance.ID); err != nil { machineScope.Error(err, "failed to terminate instance") - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedTerminate", "Failed to terminate instance %q: %v", instance.ID, err) return ctrl.Result{}, err } - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") // If the AWSMachine specifies NetworkStatus Interfaces, detach the cluster's core Security Groups from them as part of deletion. if len(machineScope.AWSMachine.Spec.NetworkInterfaces) > 0 { @@ -394,7 +429,7 @@ func (r *AWSMachineReconciler) reconcileDelete(ctx context.Context, machineScope "instanceID", instance.ID, ) - conditions.MarkFalse(machineScope.AWSMachine, infrav1.SecurityGroupsReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1.SecurityGroupsReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := machineScope.PatchObject(); err != nil { return ctrl.Result{}, err } @@ -402,11 +437,11 @@ func (r *AWSMachineReconciler) reconcileDelete(ctx context.Context, machineScope for _, id := range machineScope.AWSMachine.Spec.NetworkInterfaces { if err := ec2Service.DetachSecurityGroupsFromNetworkInterface(core, id); err != nil { machineScope.Error(err, "failed to detach security groups from instance's network interfaces") - conditions.MarkFalse(machineScope.AWSMachine, infrav1.SecurityGroupsReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1.SecurityGroupsReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return ctrl.Result{}, err } } - conditions.MarkFalse(machineScope.AWSMachine, infrav1.SecurityGroupsReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1.SecurityGroupsReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") } // Release an Elastic IP when the machine has public IP Address (EIP) with a cluster-wide config @@ -448,6 +483,7 @@ func (r *AWSMachineReconciler) findInstance(machineScope *scope.MachineScope, ec } else { // If the ProviderID is populated, describe the instance using the ID. // InstanceIfExists() returns error (ErrInstanceNotFoundByID or ErrDescribeInstance) if the instance could not be found. + //nolint:staticcheck instance, err = ec2svc.InstanceIfExists(ptr.To[string](pid.ID())) if err != nil { @@ -476,16 +512,16 @@ func (r *AWSMachineReconciler) reconcileNormal(ctx context.Context, machineScope return ctrl.Result{}, nil } - if !machineScope.Cluster.Status.InfrastructureReady { + if !ptr.Deref(machineScope.Cluster.Status.Initialization.InfrastructureProvisioned, false) { machineScope.Info("Cluster infrastructure is not ready yet") - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.WaitingForClusterInfrastructureReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.WaitingForClusterInfrastructureReason, clusterv1beta1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } // Make sure bootstrap data is available and populated. if !machineScope.IsMachinePoolMachine() && machineScope.Machine.Spec.Bootstrap.DataSecretName == nil { machineScope.Info("Bootstrap data secret reference is not yet available") - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.WaitingForBootstrapDataReason, clusterv1beta1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } @@ -495,13 +531,13 @@ func (r *AWSMachineReconciler) reconcileNormal(ctx context.Context, machineScope instance, err := r.findInstance(machineScope, ec2svc) if err != nil { machineScope.Error(err, "unable to find instance") - conditions.MarkUnknown(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotFoundReason, "%s", err.Error()) + v1beta1conditions.MarkUnknown(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotFoundReason, "%s", err.Error()) return ctrl.Result{}, err } if instance == nil && machineScope.IsMachinePoolMachine() { err = errors.New("no instance found for machine pool") machineScope.Error(err, "unable to find instance") - conditions.MarkUnknown(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotFoundReason, "%s", err.Error()) + v1beta1conditions.MarkUnknown(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotFoundReason, "%s", err.Error()) return ctrl.Result{}, err } @@ -517,8 +553,8 @@ func (r *AWSMachineReconciler) reconcileNormal(ctx context.Context, machineScope // Create new instance since providerId is nil and instance could not be found by tags. if instance == nil { // Avoid a flickering condition between InstanceProvisionStarted and InstanceProvisionFailed if there's a persistent failure with createInstance - if conditions.GetReason(machineScope.AWSMachine, infrav1.InstanceReadyCondition) != infrav1.InstanceProvisionFailedReason { - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceProvisionStartedReason, clusterv1.ConditionSeverityInfo, "") + if v1beta1conditions.GetReason(machineScope.AWSMachine, infrav1.InstanceReadyCondition) != infrav1.InstanceProvisionFailedReason { + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceProvisionStartedReason, clusterv1beta1.ConditionSeverityInfo, "") if patchErr := machineScope.PatchObject(); patchErr != nil { machineScope.Error(patchErr, "failed to patch conditions") return ctrl.Result{}, patchErr @@ -534,7 +570,7 @@ func (r *AWSMachineReconciler) reconcileNormal(ctx context.Context, machineScope instance, err = r.createInstance(ctx, ec2svc, machineScope, clusterScope, objectStoreSvc) if err != nil { machineScope.Error(err, "unable to create instance") - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceProvisionFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceProvisionFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return ctrl.Result{}, err } } @@ -584,13 +620,13 @@ func (r *AWSMachineReconciler) reconcileNormal(ctx context.Context, machineScope case infrav1.InstanceStatePending: machineScope.SetNotReady() shouldRequeue = true - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotReadyReason, clusterv1.ConditionSeverityWarning, "") + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotReadyReason, clusterv1beta1.ConditionSeverityWarning, "") case infrav1.InstanceStateStopping, infrav1.InstanceStateStopped: machineScope.SetNotReady() - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceStoppedReason, clusterv1.ConditionSeverityError, "") + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceStoppedReason, clusterv1beta1.ConditionSeverityError, "") case infrav1.InstanceStateRunning: machineScope.SetReady() - conditions.MarkTrue(machineScope.AWSMachine, infrav1.InstanceReadyCondition) + v1beta1conditions.MarkTrue(machineScope.AWSMachine, infrav1.InstanceReadyCondition) case infrav1.InstanceStateShuttingDown, infrav1.InstanceStateTerminated: machineScope.SetNotReady() @@ -599,11 +635,11 @@ func (r *AWSMachineReconciler) reconcileNormal(ctx context.Context, machineScope // and therefore should not be reported as error. machineScope.Info("EC2 instance of machine pool was terminated", "state", instance.State, "instance-id", *machineScope.GetInstanceID()) r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeNormal, infrav1.InstanceTerminatedReason, "EC2 instance termination") - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceTerminatedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceTerminatedReason, clusterv1beta1.ConditionSeverityInfo, "") } else { machineScope.Info("Unexpected EC2 instance termination", "state", instance.State, "instance-id", *machineScope.GetInstanceID()) r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "InstanceUnexpectedTermination", "Unexpected EC2 instance termination") - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceTerminatedReason, clusterv1.ConditionSeverityError, "") + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceTerminatedReason, clusterv1beta1.ConditionSeverityError, "") } default: machineScope.SetNotReady() @@ -611,7 +647,7 @@ func (r *AWSMachineReconciler) reconcileNormal(ctx context.Context, machineScope r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "InstanceUnhandledState", "EC2 instance state is undefined") machineScope.SetFailureReason("UpdateError") machineScope.SetFailureMessage(errors.Errorf("EC2 instance state %q is undefined", instance.State)) - conditions.MarkUnknown(machineScope.AWSMachine, infrav1.InstanceReadyCondition, "", "") + v1beta1conditions.MarkUnknown(machineScope.AWSMachine, infrav1.InstanceReadyCondition, "", "") } // reconcile the deletion of the bootstrap data secret now that we have updated instance state @@ -681,11 +717,11 @@ func (r *AWSMachineReconciler) reconcileOperationalState(ec2svc services.EC2Inte // Ensure that the security groups are correct. _, err = r.ensureSecurityGroups(ec2svc, machineScope, machineScope.AWSMachine.Spec.AdditionalSecurityGroups, existingSecurityGroups) if err != nil { - conditions.MarkFalse(machineScope.AWSMachine, infrav1.SecurityGroupsReadyCondition, infrav1.SecurityGroupsFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1.SecurityGroupsReadyCondition, infrav1.SecurityGroupsFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) machineScope.Error(err, "unable to ensure security groups") return err } - conditions.MarkTrue(machineScope.AWSMachine, infrav1.SecurityGroupsReadyCondition) + v1beta1conditions.MarkTrue(machineScope.AWSMachine, infrav1.SecurityGroupsReadyCondition) err = r.ensureInstanceMetadataOptions(ec2svc, instance, machineScope.AWSMachine) if err != nil { @@ -712,7 +748,7 @@ func (r *AWSMachineReconciler) deleteEncryptedBootstrapDataSecret(machineScope * } // Do nothing if the AWSMachine is not in a failed state, and is operational from an EC2 perspective, but does not have a node reference - if !machineScope.HasFailed() && machineScope.InstanceIsOperational() && machineScope.Machine.Status.NodeRef == nil && !machineScope.AWSMachineIsDeleted() { + if !machineScope.HasFailed() && machineScope.InstanceIsOperational() && !machineScope.Machine.Status.NodeRef.IsDefined() && !machineScope.AWSMachineIsDeleted() { return nil } machineScope.Info("Deleting unneeded entry from AWS Secret", "secretPrefix", machineScope.GetSecretPrefix()) @@ -923,7 +959,7 @@ func (r *AWSMachineReconciler) deleteBootstrapData(ctx context.Context, machineS func (r *AWSMachineReconciler) deleteIgnitionBootstrapDataFromS3(ctx context.Context, machineScope *scope.MachineScope, objectStoreSvc services.ObjectStoreInterface) error { // Do nothing if the AWSMachine is not in a failed state, and is operational from an EC2 perspective, but does not have a node reference - if !machineScope.HasFailed() && machineScope.InstanceIsOperational() && machineScope.Machine.Status.NodeRef == nil && !machineScope.AWSMachineIsDeleted() { + if !machineScope.HasFailed() && machineScope.InstanceIsOperational() && !machineScope.Machine.Status.NodeRef.IsDefined() && !machineScope.AWSMachineIsDeleted() { return nil } @@ -1014,12 +1050,12 @@ func (r *AWSMachineReconciler) registerInstanceToClassicLB(ctx context.Context, if err := elbsvc.RegisterInstanceWithAPIServerELB(ctx, i); err != nil { r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedAttachControlPlaneELB", "Failed to register control plane instance %q with classic load balancer: %v", i.ID, err) - conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBAttachFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBAttachFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return errors.Wrapf(err, "could not register control plane instance %q with classic load balancer", i.ID) } r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeNormal, "SuccessfulAttachControlPlaneELB", "Control plane instance %q is registered with classic load balancer", i.ID) - conditions.MarkTrue(machineScope.AWSMachine, infrav1.ELBAttachedCondition) + v1beta1conditions.MarkTrue(machineScope.AWSMachine, infrav1.ELBAttachedCondition) return nil } @@ -1039,19 +1075,19 @@ func (r *AWSMachineReconciler) registerInstanceToV2LB(ctx context.Context, machi if ptr.Deref(machineScope.GetInstanceState(), infrav1.InstanceStatePending) != infrav1.InstanceStateRunning { r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedAttachControlPlaneELB", "Cannot register control plane instance %q with load balancer: instance is not running", instance.ID) - conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBAttachFailedReason, clusterv1.ConditionSeverityInfo, "instance not running") + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBAttachFailedReason, clusterv1beta1.ConditionSeverityInfo, "instance not running") return elb.NewInstanceNotRunning("instance is not running") } if err := elbsvc.RegisterInstanceWithAPIServerLB(ctx, instance, lb); err != nil { r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedAttachControlPlaneELB", "Failed to register control plane instance %q with load balancer: %v", instance.ID, err) - conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBAttachFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBAttachFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return errors.Wrapf(err, "could not register control plane instance %q with load balancer", instance.ID) } r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeNormal, "SuccessfulAttachControlPlaneELB", "Control plane instance %q is registered with load balancer", instance.ID) - conditions.MarkTrue(machineScope.AWSMachine, infrav1.ELBAttachedCondition) + v1beta1conditions.MarkTrue(machineScope.AWSMachine, infrav1.ELBAttachedCondition) return nil } @@ -1070,7 +1106,7 @@ func (r *AWSMachineReconciler) deregisterInstanceFromClassicLB(ctx context.Conte if err := elbsvc.DeregisterInstanceFromAPIServerELB(ctx, instance); err != nil { r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedDetachControlPlaneELB", "Failed to deregister control plane instance %q from load balancer: %v", instance.ID, err) - conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBDetachFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBDetachFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return errors.Wrapf(err, "could not deregister control plane instance %q from load balancer", instance.ID) } @@ -1095,7 +1131,7 @@ func (r *AWSMachineReconciler) deregisterInstanceFromV2LB(ctx context.Context, m if err := elbsvc.DeregisterInstanceFromAPIServerLB(ctx, targetGroupArn, i); err != nil { r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedDetachControlPlaneELB", "Failed to deregister control plane instance %q from load balancer: %v", i.ID, err) - conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBDetachFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBDetachFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return errors.Wrapf(err, "could not deregister control plane instance %q from load balancer", i.ID) } } @@ -1166,7 +1202,7 @@ func (r *AWSMachineReconciler) requestsForCluster(log logger.Wrapper, namespace, result := make([]ctrl.Request, 0, len(machineList.Items)) for _, m := range machineList.Items { log.WithValues("machine", klog.KObj(&m)) - if m.Spec.InfrastructureRef.GroupVersionKind().Kind != "AWSMachine" { + if m.Spec.InfrastructureRef.Kind != "AWSMachine" { log.Trace("Machine has an InfrastructureRef for a different type, will not add to reconciliation request.") continue } @@ -1174,7 +1210,7 @@ func (r *AWSMachineReconciler) requestsForCluster(log logger.Wrapper, namespace, log.Trace("Machine has an InfrastructureRef with an empty name, will not add to reconciliation request.") continue } - log.WithValues("awsMachine", klog.KRef(m.Spec.InfrastructureRef.Namespace, m.Spec.InfrastructureRef.Name)) + log.WithValues("awsMachine", klog.KRef(m.Namespace, m.Spec.InfrastructureRef.Name)) log.Trace("Adding AWSMachine to reconciliation request.") result = append(result, ctrl.Request{NamespacedName: client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.InfrastructureRef.Name}}) } @@ -1186,7 +1222,7 @@ func (r *AWSMachineReconciler) getInfraCluster(ctx context.Context, log *logger. var managedControlPlaneScope *scope.ManagedControlPlaneScope var err error - if cluster.Spec.ControlPlaneRef != nil && cluster.Spec.ControlPlaneRef.Kind == "AWSManagedControlPlane" { + if cluster.Spec.ControlPlaneRef.IsDefined() && cluster.Spec.ControlPlaneRef.Kind == "AWSManagedControlPlane" { controlPlane := &ekscontrolplanev1.AWSManagedControlPlane{} controlPlaneName := client.ObjectKey{ Namespace: awsMachine.Namespace, diff --git a/controllers/awsmachine_controller_test.go b/controllers/awsmachine_controller_test.go index c2165e16ef..ad65f4a5cb 100644 --- a/controllers/awsmachine_controller_test.go +++ b/controllers/awsmachine_controller_test.go @@ -36,6 +36,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services" @@ -43,9 +44,10 @@ import ( elbService "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/elb" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/mock_services" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) func TestAWSMachineReconcilerIntegrationTests(t *testing.T) { @@ -140,7 +142,7 @@ func TestAWSMachineReconcilerIntegrationTests(t *testing.T) { g.Expect(err).To(BeNil()) ms.Machine.Spec.Bootstrap.DataSecretName = aws.String("bootstrap-data") - ms.Machine.Spec.Version = aws.String("test") + ms.Machine.Spec.Version = "test" ms.AWSMachine.Spec.Subnet = &infrav1.AWSResourceReference{ID: aws.String("subnet-1")} ms.AWSMachine.Status.InstanceState = &infrav1.InstanceStateRunning ms.Machine.Labels = map[string]string{clusterv1.MachineControlPlaneLabel: ""} @@ -241,8 +243,8 @@ func TestAWSMachineReconcilerIntegrationTests(t *testing.T) { _, err = reconciler.reconcileDelete(context.TODO(), ms, cs, cs, cs, cs) g.Expect(err).To(BeNil()) expectConditions(g, ms.AWSMachine, []conditionAssertion{ - {infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, - {infrav1.ELBAttachedCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, + {infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, clusterv1beta1.DeletedReason}, + {infrav1.ELBAttachedCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, clusterv1beta1.DeletedReason}, }) g.Expect(ms.AWSMachine.Finalizers).ShouldNot(ContainElement(infrav1.MachineFinalizer)) }) @@ -320,7 +322,7 @@ func TestAWSMachineReconcilerIntegrationTests(t *testing.T) { g.Expect(err).To(BeNil()) ms.Machine.Spec.Bootstrap.DataSecretName = aws.String("bootstrap-data") - ms.Machine.Spec.Version = aws.String("test") + ms.Machine.Spec.Version = "test" ms.AWSMachine.Spec.Subnet = &infrav1.AWSResourceReference{ID: aws.String("subnet-1")} ms.AWSMachine.Status.InstanceState = &infrav1.InstanceStateRunning ms.Machine.Labels = map[string]string{clusterv1.MachineControlPlaneLabel: ""} @@ -422,11 +424,110 @@ func TestAWSMachineReconcilerIntegrationTests(t *testing.T) { _, err = reconciler.reconcileDelete(context.TODO(), ms, cs, cs, cs, cs) g.Expect(err).Should(HaveOccurred()) expectConditions(g, ms.AWSMachine, []conditionAssertion{ - {infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, "DeletingFailed"}, - {infrav1.ELBAttachedCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, + {infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityWarning, "DeletingFailed"}, + {infrav1.ELBAttachedCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, clusterv1beta1.DeletedReason}, }) g.Expect(ms.AWSMachine.Finalizers).ShouldNot(ContainElement(infrav1.MachineFinalizer)) }) + t.Run("Should successfully continue AWSMachinePool machine deletion if spec.cloudInit=={}", func(t *testing.T) { + g := NewWithT(t) + mockCtrl = gomock.NewController(t) + ec2Mock := mocks.NewMockEC2API(mockCtrl) + + // Simulate terminated instance + ec2Mock.EXPECT().DescribeInstances(context.TODO(), gomock.Eq(&ec2.DescribeInstancesInput{ + InstanceIds: []string{"myMachine"}, + })).Return(&ec2.DescribeInstancesOutput{ + Reservations: []ec2types.Reservation{{Instances: []ec2types.Instance{{Placement: &ec2types.Placement{AvailabilityZone: aws.String("us-east-1a")}, InstanceId: aws.String("i-mymachine"), State: &ec2types.InstanceState{Name: ec2types.InstanceStateNameTerminated, Code: aws.Int32(48)}}}}}, + }, nil) + + ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("integ-test-%s", util.RandomString(5))) + g.Expect(err).To(BeNil()) + + setup(t, g) + awsMachine := &infrav1.AWSMachine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: ns.Name, + GenerateName: "mypool-", + Labels: map[string]string{ + clusterv1.MachinePoolNameLabel: "mypool", + clusterv1.ClusterNameLabel: "test-cluster", + }, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: expinfrav1.GroupVersion.String(), + Kind: "AWSMachinePool", + Name: "mypool", + BlockOwnerDeletion: ptr.To(true), + UID: "6d1e6238-045d-4297-8c7e-73df7a5cc998", + }, + }, + }, + Spec: infrav1.AWSMachineSpec{ + ProviderID: aws.String(providerID), + InstanceID: aws.String("i-mymachine"), + AMI: infrav1.AMIReference{ + ID: aws.String("ami-alsodoesntmatter"), + }, + InstanceType: "foo", + PublicIP: aws.Bool(false), + SSHKeyName: aws.String("foo"), + InstanceMetadataOptions: &infrav1.InstanceMetadataOptions{ + // ... + }, + IAMInstanceProfile: "foo", + AdditionalSecurityGroups: nil, + Subnet: &infrav1.AWSResourceReference{ID: aws.String("sub-doesntmatter")}, + RootVolume: &infrav1.Volume{ + Size: 8, + // ... + }, + NonRootVolumes: nil, + NetworkInterfaces: []string{"eni-foobar"}, + CloudInit: infrav1.CloudInit{}, + SpotMarketOptions: nil, + Tenancy: "host", + }, + } + createAWSMachine(g, awsMachine) + + defer teardown(g) + defer t.Cleanup(func() { + g.Expect(testEnv.Cleanup(ctx, awsMachine, ns)).To(Succeed()) + }) + + cs, err := getClusterScope(infrav1.AWSCluster{ObjectMeta: metav1.ObjectMeta{Name: "test"}}) + g.Expect(err).To(BeNil()) + cs.Cluster = &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "test-cluster"}} + ms, err := getMachineScope(cs, awsMachine) + g.Expect(err).To(BeNil()) + + // This case happened in a live object. It didn't get defaulted and actually was + // a machine pool AWSMachine managed via Ignition. The AWSMachine controller must + // not try to use this field or delete bootstrap data, as the object is managed + // by the AWSMachinePool controller. + ms.AWSMachine.Spec.CloudInit.SecureSecretsBackend = "" + now := metav1.Now() + ms.AWSMachine.DeletionTimestamp = &now + ms.AWSMachine.Status.InstanceState = &infrav1.InstanceStateTerminated + + // Machine pool controlled Machine/AWSMachine + if ms.Machine.Labels == nil { + ms.Machine.Labels = map[string]string{} + } + ms.Machine.Labels[clusterv1.MachinePoolNameLabel] = ms.AWSMachine.Labels[clusterv1.MachinePoolNameLabel] + ms.Machine.Labels[clusterv1.ClusterNameLabel] = ms.AWSMachine.Labels[clusterv1.ClusterNameLabel] + + ec2Svc := ec2Service.NewService(cs) + ec2Svc.EC2Client = ec2Mock + reconciler.ec2ServiceFactory = func(scope scope.EC2Scope) services.EC2Interface { + return ec2Svc + } + + _, err = reconciler.reconcileDelete(context.TODO(), ms, cs, cs, cs, cs) + g.Expect(err).To(BeNil()) + g.Expect(ms.AWSMachine.Finalizers).ShouldNot(ContainElement(infrav1.MachineFinalizer)) + }) } func getMachineScope(cs *scope.ClusterScope, awsMachine *infrav1.AWSMachine) (*scope.MachineScope, error) { @@ -438,7 +539,9 @@ func getMachineScope(cs *scope.ClusterScope, awsMachine *infrav1.AWSMachine) (*s Name: "test", }, Status: clusterv1.ClusterStatus{ - InfrastructureReady: true, + Initialization: clusterv1.ClusterInitializationStatus{ + InfrastructureProvisioned: ptr.To(true), + }, }, }, Machine: &clusterv1.Machine{ @@ -528,16 +631,16 @@ func (p *pointsTo) String() string { } type conditionAssertion struct { - conditionType clusterv1.ConditionType + conditionType clusterv1beta1.ConditionType status corev1.ConditionStatus - severity clusterv1.ConditionSeverity + severity clusterv1beta1.ConditionSeverity reason string } func expectConditions(g *WithT, m *infrav1.AWSMachine, expected []conditionAssertion) { g.Expect(len(m.Status.Conditions)).To(BeNumerically(">=", len(expected)), "number of conditions") for _, c := range expected { - actual := conditions.Get(m, c.conditionType) + actual := v1beta1conditions.Get(m, c.conditionType) g.Expect(actual).To(Not(BeNil())) g.Expect(actual.Type).To(Equal(c.conditionType)) g.Expect(actual.Status).To(Equal(c.status)) diff --git a/controllers/awsmachine_controller_unit_test.go b/controllers/awsmachine_controller_unit_test.go index e5e9827bdd..072c2a5e65 100644 --- a/controllers/awsmachine_controller_unit_test.go +++ b/controllers/awsmachine_controller_unit_test.go @@ -52,8 +52,9 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/mock_services" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - kubeadmv1beta1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + kubeadmv1beta1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" ) @@ -115,7 +116,9 @@ func TestAWSMachineReconciler(t *testing.T) { Name: "test", }, Status: clusterv1.ClusterStatus{ - InfrastructureReady: true, + Initialization: clusterv1.ClusterInitializationStatus{ + InfrastructureProvisioned: ptr.To(true), + }, }, }, Machine: &clusterv1.Machine{ @@ -124,6 +127,11 @@ func TestAWSMachineReconciler(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: "capi-test", + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSCluster", + Name: "aws-cluster", + APIGroup: infrav1.GroupVersion.Group, + }, Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To[string]("bootstrap-data"), }, @@ -155,12 +163,19 @@ func TestAWSMachineReconciler(t *testing.T) { Client: client, Cluster: &clusterv1.Cluster{ Status: clusterv1.ClusterStatus{ - InfrastructureReady: true, + Initialization: clusterv1.ClusterInitializationStatus{ + InfrastructureProvisioned: ptr.To(true), + }, }, }, Machine: &clusterv1.Machine{ Spec: clusterv1.MachineSpec{ ClusterName: "capi-test", + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSCluster", + Name: "aws-cluster", + APIGroup: infrav1.GroupVersion.Group, + }, Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To[string]("bootstrap-data"), }, @@ -232,7 +247,7 @@ func TestAWSMachineReconciler(t *testing.T) { setup(t, g, awsMachine) defer teardown(t, g) runningInstance(t, g) - ms.Cluster.Status.InfrastructureReady = false + ms.Cluster.Status.Initialization.InfrastructureProvisioned = ptr.To(false) buf := new(bytes.Buffer) klog.SetOutput(buf) @@ -240,7 +255,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(err).To(BeNil()) g.Expect(buf.String()).To(ContainSubstring("Cluster infrastructure is not ready yet")) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, infrav1.WaitingForClusterInfrastructureReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, infrav1.WaitingForClusterInfrastructureReason}}) }) t.Run("should exit immediately if bootstrap data secret reference isn't available", func(t *testing.T) { @@ -258,7 +273,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(err).To(BeNil()) g.Expect(buf.String()).To(ContainSubstring("Bootstrap data secret reference is not yet available")) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, infrav1.WaitingForBootstrapDataReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, infrav1.WaitingForBootstrapDataReason}}) }) t.Run("should return an error when we can't list instances by tags", func(t *testing.T) { @@ -393,7 +408,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(ms.AWSMachine.Status.Ready).To(BeFalse()) g.Expect(buf.String()).To(ContainSubstring("EC2 instance state changed")) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) }) t.Run("should set instance to running", func(t *testing.T) { @@ -574,7 +589,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(ms.AWSMachine.Status.InstanceState).To(PointTo(Equal(infrav1.InstanceStateStopping))) g.Expect(ms.AWSMachine.Status.Ready).To(BeFalse()) g.Expect(buf.String()).To(ContainSubstring("EC2 instance state changed")) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.InstanceStoppedReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityError, infrav1.InstanceStoppedReason}}) }) t.Run("should then set instance to stopped and unready", func(t *testing.T) { @@ -590,7 +605,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(ms.AWSMachine.Status.InstanceState).To(PointTo(Equal(infrav1.InstanceStateStopped))) g.Expect(ms.AWSMachine.Status.Ready).To(BeFalse()) g.Expect(buf.String()).To(ContainSubstring("EC2 instance state changed")) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.InstanceStoppedReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityError, infrav1.InstanceStoppedReason}}) }) t.Run("should then set instance to running and ready once it is restarted", func(t *testing.T) { @@ -648,7 +663,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(buf.String()).To(ContainSubstring("Unexpected EC2 instance termination")) g.Eventually(recorder.Events).Should(Receive(ContainSubstring("UnexpectedTermination"))) g.Expect(ms.AWSMachine.Status.FailureMessage).To(PointTo(Equal("EC2 instance state \"terminated\" is unexpected"))) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.InstanceTerminatedReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityError, infrav1.InstanceTerminatedReason}}) }) }) t.Run("should not register if control plane ELB is already registered", func(t *testing.T) { @@ -674,7 +689,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(err).To(BeNil()) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) }) t.Run("should attach control plane ELB to instance", func(t *testing.T) { g := NewWithT(t) @@ -701,7 +716,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(err).To(BeNil()) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.ELBAttachedCondition, corev1.ConditionTrue, "", ""}}) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) }) t.Run("should store userdata for CloudInit using AWS Secrets Manager only when not skipped", func(t *testing.T) { g := NewWithT(t) @@ -721,7 +736,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(err).To(BeNil()) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) }) t.Run("should fail to delete bootstrap data secret if AWSMachine state is updated", func(t *testing.T) { @@ -730,9 +745,8 @@ func TestAWSMachineReconciler(t *testing.T) { setup(t, g, awsMachine) defer teardown(t, g) instanceCreate(t, g) - ms.Machine.Status.NodeRef = &corev1.ObjectReference{ - Namespace: "default", - Name: "test", + ms.Machine.Status.NodeRef = clusterv1.MachineNodeReference{ + Name: "test", } secretSvc.EXPECT().UserData(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).Times(1) @@ -740,7 +754,7 @@ func TestAWSMachineReconciler(t *testing.T) { secretSvc.EXPECT().Delete(gomock.Any()).Return(errors.New("failed to delete entries from AWS Secret")).Times(1) _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) g.Expect(err).To(MatchError(ContainSubstring("failed to delete entries from AWS Secret"))) }) }) @@ -771,7 +785,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(err.Error()).To(ContainSubstring(expectedError)) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.InstanceProvisionFailedReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityError, infrav1.InstanceProvisionFailedReason}}) }) t.Run("should fail to determine the registration status of control plane ELB", func(t *testing.T) { g := NewWithT(t) @@ -797,7 +811,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(err.Error()).To(ContainSubstring("error describing ELB")) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) g.Eventually(recorder.Events).Should(Receive(ContainSubstring("FailedAttachControlPlaneELB"))) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) }) t.Run("should fail to attach control plane ELB to instance", func(t *testing.T) { g := NewWithT(t) @@ -823,7 +837,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(err).ToNot(BeNil()) g.Expect(err.Error()).To(ContainSubstring("failed to attach ELB")) g.Eventually(recorder.Events).Should(Receive(ContainSubstring("FailedAttachControlPlaneELB"))) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) }) t.Run("should fail to delete bootstrap data secret if AWSMachine is in failed state", func(t *testing.T) { @@ -862,7 +876,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(err.Error()).To(ContainSubstring("json: cannot unmarshal number into Go value of type map[string]interface {}")) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) }) t.Run("Should fail to update resource tags after instance is created", func(t *testing.T) { g := NewWithT(t) @@ -881,7 +895,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(err).ToNot(BeNil()) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) }) }) t.Run("While ensuring SecurityGroups", func(t *testing.T) { @@ -912,7 +926,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(err).ToNot(BeNil()) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.SecurityGroupsReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.SecurityGroupsFailedReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.SecurityGroupsReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityError, infrav1.SecurityGroupsFailedReason}}) }) t.Run("Should fail to fetch core security groups", func(t *testing.T) { g := NewWithT(t) @@ -930,7 +944,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(err).ToNot(BeNil()) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.SecurityGroupsReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.SecurityGroupsFailedReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.SecurityGroupsReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityError, infrav1.SecurityGroupsFailedReason}}) }) t.Run("Should fail if ensureSecurityGroups fails to fetch additional security groups", func(t *testing.T) { g := NewWithT(t) @@ -960,7 +974,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(err).ToNot(BeNil()) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.SecurityGroupsReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.SecurityGroupsFailedReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.SecurityGroupsReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityError, infrav1.SecurityGroupsFailedReason}}) }) t.Run("Should fail to update security group", func(t *testing.T) { g := NewWithT(t) @@ -991,7 +1005,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(err).ToNot(BeNil()) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.SecurityGroupsReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.SecurityGroupsFailedReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.SecurityGroupsReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityError, infrav1.SecurityGroupsFailedReason}}) }) }) }) @@ -1071,10 +1085,8 @@ func TestAWSMachineReconciler(t *testing.T) { ID: "myMachine", } - ms.Machine.Status.NodeRef = &corev1.ObjectReference{ - Kind: "Node", - Name: "myMachine", - APIVersion: "v1", + ms.Machine.Status.NodeRef = clusterv1.MachineNodeReference{ + Name: "myMachine", } ms.AWSMachine.Spec.CloudInit = infrav1.CloudInit{ @@ -1416,10 +1428,8 @@ func TestAWSMachineReconciler(t *testing.T) { ID: "myMachine", } - ms.Machine.Status.NodeRef = &corev1.ObjectReference{ - Kind: "Node", - Name: "myMachine", - APIVersion: "v1", + ms.Machine.Status.NodeRef = clusterv1.MachineNodeReference{ + Name: "myMachine", } ec2Svc.EXPECT().GetRunningInstanceByTags(gomock.Any()).Return(instance, nil).AnyTimes() @@ -1839,7 +1849,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(err.Error()).To(ContainSubstring("error describing ELB")) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(metav1.FinalizerDeleteDependents)) g.Eventually(recorder.Events).Should(Receive(ContainSubstring("FailedDetachControlPlaneELB"))) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.ELBAttachedCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, "DeletingFailed"}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.ELBAttachedCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityWarning, "DeletingFailed"}}) }) t.Run("should not do anything if control plane ELB is already detached from instance", func(t *testing.T) { @@ -1862,7 +1872,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileDelete(context.TODO(), ms, cs, cs, cs, cs) g.Expect(err).To(BeNil()) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(metav1.FinalizerDeleteDependents)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.ELBAttachedCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.ELBAttachedCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, clusterv1beta1.DeletedReason}}) }) }) }) @@ -1888,7 +1898,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileDelete(context.TODO(), ms, cs, cs, cs, cs) g.Expect(err).To(BeNil()) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(metav1.FinalizerDeleteDependents)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.ELBAttachedCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.ELBAttachedCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, clusterv1beta1.DeletedReason}}) }) t.Run("should fail to detach control plane ELB from instance", func(t *testing.T) { g := NewWithT(t) @@ -1912,7 +1922,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(err).ToNot(BeNil()) g.Expect(err.Error()).To(ContainSubstring("Duplicate access point name for load balancer")) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(metav1.FinalizerDeleteDependents)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.ELBAttachedCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, "DeletingFailed"}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.ELBAttachedCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityWarning, "DeletingFailed"}}) }) t.Run("should fail if secretPrefix present, but secretCount is not set", func(t *testing.T) { g := NewWithT(t) @@ -1966,8 +1976,17 @@ func TestAWSMachineReconcilerAWSClusterToAWSMachines(t *testing.T) { requests []reconcile.Request }{ { - name: "Should create reconcile request successfully", - ownerCluster: &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "capi-test-6"}}, + name: "Should create reconcile request successfully", + ownerCluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "capi-test-6"}, + Spec: clusterv1.ClusterSpec{ + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSCluster", + Name: "aws-cluster-6", + APIGroup: infrav1.GroupVersion.Group, + }, + }, + }, awsMachine: &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "aws-test-6", @@ -1977,10 +1996,17 @@ func TestAWSMachineReconcilerAWSClusterToAWSMachines(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: "capi-test", - InfrastructureRef: corev1.ObjectReference{ - Kind: "AWSMachine", - Name: "aws-machine-6", - APIVersion: infrav1.GroupVersion.String(), + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSMachine", + Name: "aws-machine-6", + APIGroup: infrav1.GroupVersion.Group, + }, + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: clusterv1.ContractVersionedObjectReference{ + Name: "capi-test-6-config", + Kind: "EKSConfig", + APIGroup: clusterv1.GroupVersion.Group, + }, }, }, }, @@ -2006,8 +2032,17 @@ func TestAWSMachineReconcilerAWSClusterToAWSMachines(t *testing.T) { }, }, { - name: "Should not create reconcile request for deleted clusters", - ownerCluster: &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1", DeletionTimestamp: &metav1.Time{Time: time.Now()}}}, + name: "Should not create reconcile request for deleted clusters", + ownerCluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1", DeletionTimestamp: &metav1.Time{Time: time.Now()}}, + Spec: clusterv1.ClusterSpec{ + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSCluster", + Name: "aws-cluster-1", + APIGroup: infrav1.GroupVersion.Group, + }, + }, + }, awsMachine: &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ @@ -2017,10 +2052,17 @@ func TestAWSMachineReconcilerAWSClusterToAWSMachines(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: "capi-test", - InfrastructureRef: corev1.ObjectReference{ - Kind: "AWSMachine", - Name: "aws-machine-1", - APIVersion: infrav1.GroupVersion.String(), + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSMachine", + Name: "aws-machine-1", + APIGroup: infrav1.GroupVersion.Group, + }, + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: clusterv1.ContractVersionedObjectReference{ + Name: "capi-test-1-config", + Kind: "EKSConfig", + APIGroup: clusterv1.GroupVersion.Group, + }, }, }, }, @@ -2049,10 +2091,17 @@ func TestAWSMachineReconcilerAWSClusterToAWSMachines(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: "capi-test", - InfrastructureRef: corev1.ObjectReference{ - Kind: "AWSMachine", - Name: "aws-machine-2", - APIVersion: infrav1.GroupVersion.String(), + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSMachine", + Name: "aws-machine-2", + APIGroup: infrav1.GroupVersion.Group, + }, + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: clusterv1.ContractVersionedObjectReference{ + Name: "capi-test-2-config", + Kind: "EKSConfig", + APIGroup: clusterv1.GroupVersion.Group, + }, }, }, }, @@ -2070,18 +2119,34 @@ func TestAWSMachineReconcilerAWSClusterToAWSMachines(t *testing.T) { }, }, { - name: "Should not create reconcile request if owned Machines not found", - ownerCluster: &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "capi-test-3"}}, + name: "Should not create reconcile request if owned Machines not found", + ownerCluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "capi-test-3"}, + Spec: clusterv1.ClusterSpec{ + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSCluster", + Name: "aws-cluster-3", + APIGroup: infrav1.GroupVersion.Group, + }, + }, + }, awsMachine: &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "aws-test-3", }, Spec: clusterv1.MachineSpec{ ClusterName: "capi-test", - InfrastructureRef: corev1.ObjectReference{ - Kind: "AWSMachine", - Name: "aws-machine-3", - APIVersion: infrav1.GroupVersion.String(), + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSMachine", + Name: "aws-machine-3", + APIGroup: infrav1.GroupVersion.Group, + }, + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: clusterv1.ContractVersionedObjectReference{ + Name: "capi-test-6-config", + Kind: "EKSConfig", + APIGroup: clusterv1.GroupVersion.Group, + }, }, }, }, @@ -2100,8 +2165,17 @@ func TestAWSMachineReconcilerAWSClusterToAWSMachines(t *testing.T) { requests: []reconcile.Request{}, }, { - name: "Should not create reconcile request if owned Machine type is not AWSMachine", - ownerCluster: &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "capi-test-4"}}, + name: "Should not create reconcile request if owned Machine type is not AWSMachine", + ownerCluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "capi-test-4"}, + Spec: clusterv1.ClusterSpec{ + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSCluster", + Name: "aws-cluster-4", + APIGroup: infrav1.GroupVersion.Group, + }, + }, + }, awsMachine: &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ @@ -2115,10 +2189,17 @@ func TestAWSMachineReconcilerAWSClusterToAWSMachines(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: "capi-test", - InfrastructureRef: corev1.ObjectReference{ - Kind: "Machine", - Name: "aws-machine-4", - APIVersion: infrav1.GroupVersion.String(), + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "Machine", + Name: "aws-machine-4", + APIGroup: infrav1.GroupVersion.Group, + }, + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: clusterv1.ContractVersionedObjectReference{ + Name: "capi-test-4-config", + Kind: "EKSConfig", + APIGroup: clusterv1.GroupVersion.Group, + }, }, }, }, @@ -2136,9 +2217,19 @@ func TestAWSMachineReconcilerAWSClusterToAWSMachines(t *testing.T) { }, requests: []reconcile.Request{}, }, + // BONFIRE: probably need to delete this test { - name: "Should not create reconcile request if name for machine in infrastructure ref not found", - ownerCluster: &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "capi-test-5"}}, + name: "Should not create reconcile request if name for machine in infrastructure ref not found", + ownerCluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "capi-test-5"}, + Spec: clusterv1.ClusterSpec{ + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSCluster", + Name: "aws-cluster-5", + APIGroup: infrav1.GroupVersion.Group, + }, + }, + }, awsMachine: &clusterv1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "aws-test-5", @@ -2148,9 +2239,17 @@ func TestAWSMachineReconcilerAWSClusterToAWSMachines(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: "capi-test", - InfrastructureRef: corev1.ObjectReference{ - Kind: "AWSMachine", - APIVersion: infrav1.GroupVersion.String(), + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Name: "capi-test-5", + Kind: "AWSMachine", + APIGroup: infrav1.GroupVersion.Group, + }, + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: clusterv1.ContractVersionedObjectReference{ + Name: "capi-test-5-config", + Kind: "EKSConfig", + APIGroup: clusterv1.GroupVersion.Group, + }, }, }, }, @@ -2159,7 +2258,7 @@ func TestAWSMachineReconcilerAWSClusterToAWSMachines(t *testing.T) { Name: "aws-test-5", OwnerReferences: []metav1.OwnerReference{ { - Name: "capi-test-5", + Name: "capi-test-not-5", Kind: "Cluster", APIVersion: clusterv1.GroupVersion.String(), }, @@ -2210,13 +2309,31 @@ func TestAWSMachineReconcilerRequeueAWSMachinesForUnpausedCluster(t *testing.T) requests []reconcile.Request }{ { - name: "Should not create reconcile request for deleted clusters", - ownerCluster: &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1", Namespace: "default", DeletionTimestamp: &metav1.Time{Time: time.Now()}}}, + name: "Should not create reconcile request for deleted clusters", + ownerCluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1", Namespace: "default", DeletionTimestamp: &metav1.Time{Time: time.Now()}}, + Spec: clusterv1.ClusterSpec{ + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSCluster", + Name: "aws-cluster-1", + APIGroup: infrav1.GroupVersion.Group, + }, + }, + }, }, { - name: "Should create reconcile request successfully", - ownerCluster: &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1", Namespace: "default"}}, - requests: []reconcile.Request{}, + name: "Should create reconcile request successfully", + ownerCluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1", Namespace: "default"}, + Spec: clusterv1.ClusterSpec{ + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSCluster", + Name: "aws-cluster-1", + APIGroup: infrav1.GroupVersion.Group, + }, + }, + }, + requests: []reconcile.Request{}, }, } for _, tc := range testCases { @@ -2327,10 +2444,31 @@ func TestAWSMachineReconcilerReconcile(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: "capi-test", + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSMachine", + Name: "aws-machine-6", + APIGroup: infrav1.GroupVersion.Group, + }, + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: clusterv1.ContractVersionedObjectReference{ + Name: "capi-test-6-config", + Kind: "EKSConfig", + APIGroup: clusterv1.GroupVersion.Group, + }, + }, }, }, - ownerCluster: &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1"}}, - expectError: false, + ownerCluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1"}, + Spec: clusterv1.ClusterSpec{ + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSCluster", + Name: "aws-cluster-1", + APIGroup: infrav1.GroupVersion.Group, + }, + }, + }, + expectError: false, }, { name: "Should not Reconcile if cluster is paused", @@ -2355,11 +2493,30 @@ func TestAWSMachineReconcilerReconcile(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: "capi-test", + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSMachine", + Name: "aws-machine-6", + APIGroup: infrav1.GroupVersion.Group, + }, + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: clusterv1.ContractVersionedObjectReference{ + Name: "capi-test-6-config", + Kind: "EKSConfig", + APIGroup: clusterv1.GroupVersion.Group, + }, + }, + }, + }, + ownerCluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1"}, + Spec: clusterv1.ClusterSpec{ + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSCluster", + Name: "aws-cluster-1", + APIGroup: infrav1.GroupVersion.Group, + }, }, }, - ownerCluster: &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1"}, Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{Name: "foo"}, - }}, expectError: false, }, { @@ -2382,14 +2539,36 @@ func TestAWSMachineReconcilerReconcile(t *testing.T) { clusterv1.ClusterNameLabel: "capi-test-1", }, Name: "capi-test-machine", Namespace: "default", - }, Spec: clusterv1.MachineSpec{ + }, + Spec: clusterv1.MachineSpec{ ClusterName: "capi-test", + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSMachine", + Name: "aws-machine-6", + APIGroup: infrav1.GroupVersion.Group, + }, + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: clusterv1.ContractVersionedObjectReference{ + Name: "capi-test-6-config", + Kind: "EKSConfig", + APIGroup: clusterv1.GroupVersion.Group, + }, + }, }, }, ownerCluster: &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1"}, Spec: clusterv1.ClusterSpec{ - ControlPlaneRef: &corev1.ObjectReference{Kind: AWSManagedControlPlaneRefKind}, + ControlPlaneRef: clusterv1.ContractVersionedObjectReference{ + Name: "capi-test-1-cpref", + Kind: AWSManagedControlPlaneRefKind, + APIGroup: clusterv1.GroupVersion.Group, + }, + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSCluster", + Name: "aws-cluster-1", + APIGroup: infrav1.GroupVersion.Group, + }, }, }, expectError: false, @@ -2417,12 +2596,28 @@ func TestAWSMachineReconcilerReconcile(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: "capi-test", + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSMachine", + Name: "aws-machine-6", + APIGroup: infrav1.GroupVersion.Group, + }, + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: clusterv1.ContractVersionedObjectReference{ + Name: "capi-test-6-config", + Kind: "EKSConfig", + APIGroup: clusterv1.GroupVersion.Group, + }, + }, }, }, ownerCluster: &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1"}, Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{Name: "aws-test-5"}, + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSCluster", + Name: "aws-cluster-1", + APIGroup: infrav1.GroupVersion.Group, + }, }, }, expectError: false, @@ -2450,12 +2645,28 @@ func TestAWSMachineReconcilerReconcile(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: "capi-test", + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSMachine", + Name: "aws-machine-6", + APIGroup: infrav1.GroupVersion.Group, + }, + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: clusterv1.ContractVersionedObjectReference{ + Name: "capi-test-6-config", + Kind: "EKSConfig", + APIGroup: clusterv1.GroupVersion.Group, + }, + }, }, }, ownerCluster: &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1"}, Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{Name: "aws-test-5"}, + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSCluster", + Name: "aws-test-5", + APIGroup: infrav1.GroupVersion.Group, + }, }, }, awsCluster: &infrav1.AWSCluster{ObjectMeta: metav1.ObjectMeta{Name: "aws-test-5"}}, @@ -2541,21 +2752,21 @@ func TestAWSMachineReconcilerReconcileDefaultsToLoadBalancerTypeClassic(t *testi ownerCluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1", Namespace: ns}, Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{ - Kind: "AWSCluster", - Name: "capi-test-1", // assuming same name - Namespace: ns, - APIVersion: infrav1.GroupVersion.String(), + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSCluster", + Name: "capi-test-1", // assuming same name + APIGroup: infrav1.GroupVersion.Group, }, - ControlPlaneRef: &corev1.ObjectReference{ - Kind: "KubeadmControlPlane", - Namespace: cp.Namespace, - Name: cp.Name, - APIVersion: kubeadmv1beta1.GroupVersion.String(), + ControlPlaneRef: clusterv1.ContractVersionedObjectReference{ + Kind: "KubeadmControlPlane", + Name: cp.Name, + APIGroup: kubeadmv1beta1.GroupVersion.Group, }, }, Status: clusterv1.ClusterStatus{ - InfrastructureReady: true, + Initialization: clusterv1.ClusterInitializationStatus{ + InfrastructureProvisioned: ptr.To(true), + }, }, } @@ -2618,6 +2829,11 @@ func TestAWSMachineReconcilerReconcileDefaultsToLoadBalancerTypeClassic(t *testi }, Spec: clusterv1.MachineSpec{ ClusterName: "capi-test", + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSMachine", + Name: "aws-machine-6", + APIGroup: infrav1.GroupVersion.Group, + }, Bootstrap: clusterv1.Bootstrap{ DataSecretName: aws.String("bootstrap-data"), }, @@ -2647,7 +2863,7 @@ func TestAWSMachineReconcilerReconcileDefaultsToLoadBalancerTypeClassic(t *testi }, }, Status: infrav1.AWSMachineStatus{ - Conditions: clusterv1.Conditions{ + Conditions: clusterv1beta1.Conditions{ { Type: "Paused", Status: corev1.ConditionFalse, diff --git a/controllers/awsmachinetemplate_controller.go b/controllers/awsmachinetemplate_controller.go new file mode 100644 index 0000000000..ad54b55f29 --- /dev/null +++ b/controllers/awsmachinetemplate_controller.go @@ -0,0 +1,555 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + "strings" + + "github.com/aws/aws-sdk-go-v2/service/ec2" + ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" + ec2service "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ec2" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" + "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" + controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/predicates" +) + +const ( + // awsMachineTemplateKind is the Kind name for AWSMachineTemplate resources. + awsMachineTemplateKind = "AWSMachineTemplate" +) + +// AWSMachineTemplateReconciler reconciles AWSMachineTemplate objects. +// +// This controller automatically populates capacity information for AWSMachineTemplate resources +// to enable autoscaling from zero. +// +// See: https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20210310-opt-in-autoscaling-from-zero.md +type AWSMachineTemplateReconciler struct { + client.Client + WatchFilterValue string +} + +// SetupWithManager sets up the controller with the Manager. +func (r *AWSMachineTemplateReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { + log := logger.FromContext(ctx) + + b := ctrl.NewControllerManagedBy(mgr). + For(&infrav1.AWSMachineTemplate{}). + WithOptions(options). + WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), log.GetLogger(), r.WatchFilterValue)). + Watches( + &clusterv1.MachineDeployment{}, + handler.EnqueueRequestsFromMapFunc(r.machineDeploymentToAWSMachineTemplate), + // Only emit events for creation to reconcile in case the MachineDeployment got created after the AWSMachineTemplate was reconciled. + builder.WithPredicates(resourceCreatedPredicate), + ). + Watches( + &clusterv1.MachineSet{}, + handler.EnqueueRequestsFromMapFunc(r.machineSetToAWSMachineTemplate), + // Only emit events for creation to reconcile in case the MachineSet got created after the AWSMachineTemplate was reconciled. + builder.WithPredicates(resourceCreatedPredicate), + ) + + // Watch KubeadmControlPlane if they exist. + if _, err := mgr.GetRESTMapper().RESTMapping(schema.GroupKind{Group: controlplanev1.GroupVersion.Group, Kind: "KubeadmControlPlane"}, controlplanev1.GroupVersion.Version); err == nil { + b = b.Watches(&controlplanev1.KubeadmControlPlane{}, + handler.EnqueueRequestsFromMapFunc(r.kubeadmControlPlaneToAWSMachineTemplate), + // Only emit events for creation to reconcile in case the KubeadmControlPlane got created after the AWSMachineTemplate was reconciled. + builder.WithPredicates(resourceCreatedPredicate), + ) + } + + _, err := b.Build(r) + if err != nil { + return errors.Wrap(err, "failed setting up with a controller manager") + } + + return nil +} + +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachinetemplates,verbs=get;list;watch +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachinetemplates/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsclusters,verbs=get;list;watch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters,verbs=get;list;watch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinedeployments;machinesets,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch + +// Reconcile populates capacity information for AWSMachineTemplate. +func (r *AWSMachineTemplateReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + log := logger.FromContext(ctx) + + // Fetch the AWSMachineTemplate + awsMachineTemplate := &infrav1.AWSMachineTemplate{} + if err := r.Get(ctx, req.NamespacedName, awsMachineTemplate); err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + + // Get instance type from spec + instanceType := awsMachineTemplate.Spec.Template.Spec.InstanceType + if instanceType == "" { + return ctrl.Result{}, nil + } + + // Check if capacity and nodeInfo are already populated + // This avoids unnecessary AWS API calls when the status is already populated + if len(awsMachineTemplate.Status.Capacity) > 0 && + awsMachineTemplate.Status.NodeInfo != nil && awsMachineTemplate.Status.NodeInfo.OperatingSystem != "" && awsMachineTemplate.Status.NodeInfo.Architecture != "" { + return ctrl.Result{}, nil + } + + // Get the owner cluster + cluster, err := util.GetOwnerCluster(ctx, r.Client, awsMachineTemplate.ObjectMeta) + if err != nil { + return ctrl.Result{}, err + } + if cluster == nil { + return ctrl.Result{}, nil + } + + // Check if the resource is paused + if isPaused, conditionChanged, err := paused.EnsurePausedCondition(ctx, r.Client, cluster, awsMachineTemplate); err != nil || isPaused || conditionChanged { + return ctrl.Result{}, err + } + + // Find the region by checking ownerReferences + region, err := r.getRegion(ctx, cluster) + if err != nil { + return ctrl.Result{}, err + } + if region == "" { + return ctrl.Result{}, nil + } + + // Create global scope for this region + // Reference: exp/instancestate/awsinstancestate_controller.go:68-76 + globalScope, err := scope.NewGlobalScope(scope.GlobalScopeParams{ + ControllerName: "awsmachinetemplate", + Region: region, + }) + if err != nil { + record.Warnf(awsMachineTemplate, "AWSSessionFailed", "Failed to create AWS session for region %q: %v", region, err) + return ctrl.Result{}, nil + } + + // Create EC2 client from global scope + ec2Client := ec2.NewFromConfig(globalScope.Session()) + + // Query instance type capacity + capacity, err := r.getInstanceTypeCapacity(ctx, ec2Client, instanceType) + if err != nil { + record.Warnf(awsMachineTemplate, "CapacityQueryFailed", "Failed to query capacity for instance type %q: %v", instanceType, err) + return ctrl.Result{}, nil + } + + // Query node info (architecture and OS) + nodeInfo, err := r.getNodeInfo(ctx, ec2Client, awsMachineTemplate, instanceType) + if err != nil { + record.Warnf(awsMachineTemplate, "NodeInfoQueryFailed", "Failed to query node info for instance type %q: %v", instanceType, err) + return ctrl.Result{}, nil + } + + // Save original before modifying, then update all status fields at once + original := awsMachineTemplate.DeepCopy() + if len(capacity) > 0 { + awsMachineTemplate.Status.Capacity = capacity + } + if nodeInfo != nil && (nodeInfo.Architecture != "" || nodeInfo.OperatingSystem != "") { + awsMachineTemplate.Status.NodeInfo = nodeInfo + } + if err := r.Status().Patch(ctx, awsMachineTemplate, client.MergeFrom(original)); err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to update AWSMachineTemplate status") + } + + log.Info("Successfully populated capacity and nodeInfo", "instanceType", instanceType, "region", region, "capacity", capacity, "nodeInfo", nodeInfo) + return ctrl.Result{}, nil +} + +// getRegion finds the region by checking the template's owner cluster reference. +func (r *AWSMachineTemplateReconciler) getRegion(ctx context.Context, cluster *clusterv1.Cluster) (string, error) { + if cluster == nil { + return "", errors.New("no owner cluster found") + } + + // Get region from AWSCluster (standard EC2-based cluster) + if cluster.Spec.InfrastructureRef.IsDefined() && cluster.Spec.InfrastructureRef.Kind == "AWSCluster" { + awsCluster := &infrav1.AWSCluster{} + if err := r.Get(ctx, client.ObjectKey{ + Namespace: cluster.Namespace, + Name: cluster.Spec.InfrastructureRef.Name, + }, awsCluster); err != nil { + if !apierrors.IsNotFound(err) { + return "", errors.Wrapf(err, "failed to get AWSCluster %s/%s", cluster.Namespace, cluster.Spec.InfrastructureRef.Name) + } + } else if awsCluster.Spec.Region != "" { + return awsCluster.Spec.Region, nil + } + } + + // Get region from AWSManagedControlPlane (EKS cluster) + if cluster.Spec.ControlPlaneRef.IsDefined() && cluster.Spec.ControlPlaneRef.Kind == "AWSManagedControlPlane" { + awsManagedCP := &ekscontrolplanev1.AWSManagedControlPlane{} + if err := r.Get(ctx, client.ObjectKey{ + Namespace: cluster.Namespace, + Name: cluster.Spec.ControlPlaneRef.Name, + }, awsManagedCP); err != nil { + if !apierrors.IsNotFound(err) { + return "", errors.Wrapf(err, "failed to get AWSManagedControlPlane %s/%s", cluster.Namespace, cluster.Spec.ControlPlaneRef.Name) + } + } else if awsManagedCP.Spec.Region != "" { + return awsManagedCP.Spec.Region, nil + } + } + + return "", nil +} + +// getInstanceTypeCapacity queries AWS EC2 API for instance type capacity information. +// Returns the resource list (CPU, Memory). +func (r *AWSMachineTemplateReconciler) getInstanceTypeCapacity(ctx context.Context, ec2Client *ec2.Client, instanceType string) (corev1.ResourceList, error) { + // Query instance type information + input := &ec2.DescribeInstanceTypesInput{ + InstanceTypes: []ec2types.InstanceType{ec2types.InstanceType(instanceType)}, + } + + result, err := ec2Client.DescribeInstanceTypes(ctx, input) + if err != nil { + return nil, errors.Wrapf(err, "failed to describe instance type %q", instanceType) + } + + if len(result.InstanceTypes) == 0 { + return nil, errors.Errorf("no information found for instance type %q", instanceType) + } + + // Extract capacity information + info := result.InstanceTypes[0] + resourceList := corev1.ResourceList{} + + // CPU + if info.VCpuInfo != nil && info.VCpuInfo.DefaultVCpus != nil { + resourceList[corev1.ResourceCPU] = *resource.NewQuantity(int64(*info.VCpuInfo.DefaultVCpus), resource.DecimalSI) + } + + // Memory + if info.MemoryInfo != nil && info.MemoryInfo.SizeInMiB != nil { + resourceList[corev1.ResourceMemory] = resource.MustParse(fmt.Sprintf("%dMi", *info.MemoryInfo.SizeInMiB)) + } + + return resourceList, nil +} + +// getNodeInfo queries node information (architecture and OS) for the AWSMachineTemplate. +// It attempts to resolve nodeInfo using three strategies in order of priority: +// 1. Directly from explicitly specified AMI ID +// 2. From default AMI lookup (requires Kubernetes version from owner MachineDeployment/KubeadmControlPlane) +// 3. From instance type architecture (OS cannot be determined, only architecture) +func (r *AWSMachineTemplateReconciler) getNodeInfo(ctx context.Context, ec2Client *ec2.Client, template *infrav1.AWSMachineTemplate, instanceType string) (*infrav1.NodeInfo, error) { + // Strategy 1: Extract nodeInfo from the AMI if an ID is set. + if amiID := ptr.Deref(template.Spec.Template.Spec.AMI.ID, ""); amiID != "" { + result, err := ec2Client.DescribeImages(ctx, &ec2.DescribeImagesInput{ + ImageIds: []string{amiID}, + }) + if err != nil { + return nil, errors.Wrapf(err, "failed to describe AMI %q", amiID) + } + if len(result.Images) == 0 { + return nil, errors.Errorf("no information found for AMI %q", amiID) + } + // Extract nodeInfo directly from the image object (no additional API call needed) + return r.extractNodeInfoFromImage(result.Images[0]), nil + } + + // No explicit AMI ID specified, query instance type to determine architecture + // This architecture will be used to lookup default AMI (Strategy 2) or as fallback (Strategy 3) + result, err := ec2Client.DescribeInstanceTypes(ctx, &ec2.DescribeInstanceTypesInput{ + InstanceTypes: []ec2types.InstanceType{ec2types.InstanceType(instanceType)}, + }) + if err != nil { + return nil, errors.Wrapf(err, "failed to describe instance type %q", instanceType) + } + + if len(result.InstanceTypes) == 0 { + return nil, errors.Errorf("no information found for instance type %q", instanceType) + } + + instanceTypeInfo := result.InstanceTypes[0] + + // Instance type must support exactly one architecture + if instanceTypeInfo.ProcessorInfo == nil || len(instanceTypeInfo.ProcessorInfo.SupportedArchitectures) != 1 { + return nil, errors.Errorf("instance type must support exactly one architecture, got %d", len(instanceTypeInfo.ProcessorInfo.SupportedArchitectures)) + } + + // Map EC2 architecture type to architecture tag for AMI lookup + var architecture string + var nodeInfoArch infrav1.Architecture + switch instanceTypeInfo.ProcessorInfo.SupportedArchitectures[0] { + case ec2types.ArchitectureTypeX8664: + architecture = ec2service.Amd64ArchitectureTag + nodeInfoArch = infrav1.ArchitectureAmd64 + case ec2types.ArchitectureTypeArm64: + architecture = ec2service.Arm64ArchitectureTag + nodeInfoArch = infrav1.ArchitectureArm64 + default: + return nil, errors.Errorf("unsupported architecture: %v", instanceTypeInfo.ProcessorInfo.SupportedArchitectures[0]) + } + + // Strategy 2: Try to get Kubernetes version and lookup default AMI + kubernetesVersion, err := r.getKubernetesVersion(ctx, template) + if err == nil && kubernetesVersion != "" { + // Attempt AMI lookup with the version + image, err := ec2service.DefaultAMILookup( + ec2Client, + template.Spec.Template.Spec.ImageLookupOrg, + template.Spec.Template.Spec.ImageLookupBaseOS, + kubernetesVersion, + architecture, + template.Spec.Template.Spec.ImageLookupFormat, + ) + if err == nil && image != nil { + // Successfully found AMI, extract accurate nodeInfo from it + return r.extractNodeInfoFromImage(*image), nil + } + // AMI lookup failed, fall through to Strategy 3 + } + + // Strategy 3: Fallback to instance type architecture only + // Note: OS cannot be determined from instance type alone, only architecture + return &infrav1.NodeInfo{ + Architecture: nodeInfoArch, + }, nil +} + +// extractNodeInfoFromImage extracts nodeInfo (architecture and OS) from an EC2 image. +// This is a pure function with no AWS API calls. +func (r *AWSMachineTemplateReconciler) extractNodeInfoFromImage(image ec2types.Image) *infrav1.NodeInfo { + nodeInfo := &infrav1.NodeInfo{} + + // Extract architecture from AMI + switch image.Architecture { + case ec2types.ArchitectureValuesX8664: + nodeInfo.Architecture = infrav1.ArchitectureAmd64 + case ec2types.ArchitectureValuesArm64: + nodeInfo.Architecture = infrav1.ArchitectureArm64 + } + + // Determine OS - default to Linux, change to Windows if detected + // Most AMIs are Linux-based, so we initialize with Linux as the default + nodeInfo.OperatingSystem = infrav1.OperatingSystemLinux + + // Check Platform field (most reliable for Windows detection) + if image.Platform == ec2types.PlatformValuesWindows { + nodeInfo.OperatingSystem = infrav1.OperatingSystemWindows + return nodeInfo + } + + // Check PlatformDetails field for Windows indication + if image.PlatformDetails != nil { + platformDetails := strings.ToLower(*image.PlatformDetails) + if strings.Contains(platformDetails, string(infrav1.OperatingSystemWindows)) { + nodeInfo.OperatingSystem = infrav1.OperatingSystemWindows + } + } + + return nodeInfo +} + +// getKubernetesVersion attempts to find the Kubernetes version by querying MachineDeployments +// or KubeadmControlPlanes that reference this AWSMachineTemplate. +func (r *AWSMachineTemplateReconciler) getKubernetesVersion(ctx context.Context, template *infrav1.AWSMachineTemplate) (string, error) { + listOpts, err := getParentListOptions(template.ObjectMeta) + if err != nil { + return "", errors.Wrap(err, "failed to get parent list options") + } + + // Try to find version from MachineSet first + machineSetList := &clusterv1.MachineSetList{} + if err := r.List(ctx, machineSetList, listOpts...); err != nil { + return "", errors.Wrap(err, "failed to list MachineSets") + } + + // Find MachineSets that reference this AWSMachineTemplate + for _, ms := range machineSetList.Items { + if ms.Spec.Template.Spec.InfrastructureRef.Kind == awsMachineTemplateKind && + ms.Spec.Template.Spec.InfrastructureRef.Name == template.Name && + ms.Spec.Template.Spec.Version != "" { + return ms.Spec.Template.Spec.Version, nil + } + } + + // If not found, try MachineDeployment. + machineDeploymentList := &clusterv1.MachineDeploymentList{} + if err := r.List(ctx, machineDeploymentList, listOpts...); err != nil { + return "", errors.Wrap(err, "failed to list MachineDeployments") + } + + // Find MachineDeployments that reference this AWSMachineTemplate + for _, md := range machineDeploymentList.Items { + if md.Spec.Template.Spec.InfrastructureRef.Kind == awsMachineTemplateKind && + md.Spec.Template.Spec.InfrastructureRef.Name == template.Name && + md.Spec.Template.Spec.Version != "" { + return md.Spec.Template.Spec.Version, nil + } + } + + // If not found, try KubeadmControlPlane + kcpList := &controlplanev1.KubeadmControlPlaneList{} + if err := r.List(ctx, kcpList, listOpts...); err != nil { + return "", errors.Wrap(err, "failed to list KubeadmControlPlanes") + } + + // Find KubeadmControlPlanes that reference this AWSMachineTemplate + for _, kcp := range kcpList.Items { + if kcp.Spec.MachineTemplate.Spec.InfrastructureRef.Kind == awsMachineTemplateKind && + kcp.Spec.MachineTemplate.Spec.InfrastructureRef.Name == template.Name && + kcp.Spec.Version != "" { + return kcp.Spec.Version, nil + } + } + + return "", errors.New("no MachineDeployment or KubeadmControlPlane found referencing this AWSMachineTemplate with a version") +} + +func getParentListOptions(obj metav1.ObjectMeta) ([]client.ListOption, error) { + listOpts := []client.ListOption{ + client.InNamespace(obj.Namespace), + } + + for _, ref := range obj.GetOwnerReferences() { + if ref.Kind != "Cluster" { + continue + } + gv, err := schema.ParseGroupVersion(ref.APIVersion) + if err != nil { + return nil, errors.WithStack(err) + } + if gv.Group == clusterv1.GroupVersion.Group { + listOpts = append(listOpts, client.MatchingLabels{ + clusterv1.ClusterNameLabel: ref.Name, + }) + break + } + } + return listOpts, nil +} + +// kubeadmControlPlaneToAWSMachineTemplate maps KubeadmControlPlane to AWSMachineTemplate reconcile requests. +// This enables the controller to reconcile AWSMachineTemplate when its owner KubeadmControlPlane is created or updated, +// ensuring that nodeInfo can be populated even if the cache hasn't synced yet. +func (r *AWSMachineTemplateReconciler) kubeadmControlPlaneToAWSMachineTemplate(ctx context.Context, o client.Object) []ctrl.Request { + kcp, ok := o.(*controlplanev1.KubeadmControlPlane) + if !ok { + return nil + } + + // Check if it references an AWSMachineTemplate + if kcp.Spec.MachineTemplate.Spec.InfrastructureRef.Kind != awsMachineTemplateKind { + return nil + } + + // Return reconcile request for the referenced AWSMachineTemplate + return []ctrl.Request{ + { + NamespacedName: client.ObjectKey{ + Namespace: kcp.Namespace, + Name: kcp.Spec.MachineTemplate.Spec.InfrastructureRef.Name, + }, + }, + } +} + +// machineDeploymentToAWSMachineTemplate maps MachineDeployment to AWSMachineTemplate reconcile requests. +// This enables the controller to reconcile AWSMachineTemplate when its owner MachineDeployment is created or updated, +// ensuring that nodeInfo can be populated even if the cache hasn't synced yet. +func (r *AWSMachineTemplateReconciler) machineDeploymentToAWSMachineTemplate(ctx context.Context, o client.Object) []ctrl.Request { + md, ok := o.(*clusterv1.MachineDeployment) + if !ok { + return nil + } + + // Check if it references an AWSMachineTemplate + if md.Spec.Template.Spec.InfrastructureRef.Kind != awsMachineTemplateKind { + return nil + } + + // Return reconcile request for the referenced AWSMachineTemplate + return []ctrl.Request{ + { + NamespacedName: client.ObjectKey{ + Namespace: md.Namespace, + Name: md.Spec.Template.Spec.InfrastructureRef.Name, + }, + }, + } +} + +// machineSetToAWSMachineTemplate maps MachineSet to AWSMachineTemplate reconcile requests. +// This enables the controller to reconcile AWSMachineTemplate when its owner MachineSet is created or updated, +// ensuring that nodeInfo can be populated even if the cache hasn't synced yet. +func (r *AWSMachineTemplateReconciler) machineSetToAWSMachineTemplate(ctx context.Context, o client.Object) []ctrl.Request { + md, ok := o.(*clusterv1.MachineSet) + if !ok { + return nil + } + + // Check if it references an AWSMachineTemplate + if md.Spec.Template.Spec.InfrastructureRef.Kind != awsMachineTemplateKind { + return nil + } + + // Return reconcile request for the referenced AWSMachineTemplate + return []ctrl.Request{ + { + NamespacedName: client.ObjectKey{ + Namespace: md.Namespace, + Name: md.Spec.Template.Spec.InfrastructureRef.Name, + }, + }, + } +} + +var resourceCreatedPredicate = predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { return true }, + UpdateFunc: func(e event.UpdateEvent) bool { return false }, + DeleteFunc: func(e event.DeleteEvent) bool { return false }, + GenericFunc: func(e event.GenericEvent) bool { return true }, +} diff --git a/controllers/awsmachinetemplate_controller_unit_test.go b/controllers/awsmachinetemplate_controller_unit_test.go new file mode 100644 index 0000000000..fb57cb7a37 --- /dev/null +++ b/controllers/awsmachinetemplate_controller_unit_test.go @@ -0,0 +1,413 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "testing" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" +) + +func TestAWSMachineTemplateReconciler(t *testing.T) { + setupScheme := func() *runtime.Scheme { + scheme := runtime.NewScheme() + _ = infrav1.AddToScheme(scheme) + _ = clusterv1.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + return scheme + } + + newFakeClient := func(objs ...client.Object) client.Client { + return fake.NewClientBuilder(). + WithScheme(setupScheme()). + WithObjects(objs...). + WithStatusSubresource(&infrav1.AWSMachineTemplate{}). + Build() + } + + newAWSMachineTemplate := func(name string) *infrav1.AWSMachineTemplate { + return &infrav1.AWSMachineTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: "default", + }, + Spec: infrav1.AWSMachineTemplateSpec{ + Template: infrav1.AWSMachineTemplateResource{ + Spec: infrav1.AWSMachineSpec{ + InstanceType: "t3.medium", + }, + }, + }, + } + } + + t.Run("getRegion", func(t *testing.T) { + t.Run("should get region from AWSCluster", func(t *testing.T) { + g := NewWithT(t) + cluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "default", + }, + Spec: clusterv1.ClusterSpec{ + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSCluster", + Name: "test-aws-cluster", + }, + }, + } + awsCluster := &infrav1.AWSCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-aws-cluster", + Namespace: "default", + }, + Spec: infrav1.AWSClusterSpec{ + Region: "us-west-2", + }, + } + + reconciler := &AWSMachineTemplateReconciler{ + Client: newFakeClient(cluster, awsCluster), + } + + region, err := reconciler.getRegion(context.Background(), cluster) + + g.Expect(err).To(BeNil()) + g.Expect(region).To(Equal("us-west-2")) + }) + + t.Run("should return error when cluster is nil", func(t *testing.T) { + g := NewWithT(t) + + reconciler := &AWSMachineTemplateReconciler{ + Client: newFakeClient(), + } + + region, err := reconciler.getRegion(context.Background(), nil) + + g.Expect(err).ToNot(BeNil()) + g.Expect(err.Error()).To(ContainSubstring("no owner cluster found")) + g.Expect(region).To(Equal("")) + }) + + t.Run("should return empty when cluster has no infrastructure ref", func(t *testing.T) { + g := NewWithT(t) + cluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "default", + }, + } + + reconciler := &AWSMachineTemplateReconciler{ + Client: newFakeClient(cluster), + } + + region, err := reconciler.getRegion(context.Background(), cluster) + + g.Expect(err).To(BeNil()) + g.Expect(region).To(Equal("")) + }) + + t.Run("should return empty when AWSCluster not found", func(t *testing.T) { + g := NewWithT(t) + cluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "default", + }, + Spec: clusterv1.ClusterSpec{ + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSCluster", + Name: "test-aws-cluster", + }, + }, + } + + reconciler := &AWSMachineTemplateReconciler{ + Client: newFakeClient(cluster), + } + + region, err := reconciler.getRegion(context.Background(), cluster) + + g.Expect(err).To(BeNil()) + g.Expect(region).To(Equal("")) + }) + }) + + // Note: getInstanceTypeInfo tests are skipped as they require EC2 client injection + // which would need significant refactoring. The function is tested indirectly through + // integration tests. + + t.Run("Reconcile", func(t *testing.T) { + t.Run("should skip reconcile when capacity and nodeInfo are already populated", func(t *testing.T) { + g := NewWithT(t) + template := newAWSMachineTemplate("test-template") + template.Status.Capacity = corev1.ResourceList{ + corev1.ResourceCPU: *resource.NewQuantity(2, resource.DecimalSI), + corev1.ResourceMemory: resource.MustParse("4Gi"), + } + template.Status.NodeInfo = &infrav1.NodeInfo{ + Architecture: infrav1.ArchitectureAmd64, + OperatingSystem: infrav1.OperatingSystemLinux, + } + + reconciler := &AWSMachineTemplateReconciler{ + Client: newFakeClient(template), + } + + // Should skip reconcile and return early without calling AWS APIs + // No need to set up owner cluster or region since the early return happens before that + result, err := reconciler.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: client.ObjectKeyFromObject(template), + }) + + g.Expect(err).To(BeNil()) + g.Expect(result.RequeueAfter).To(BeZero()) + }) + + t.Run("should reconcile when capacity set but nodeInfo is not", func(t *testing.T) { + g := NewWithT(t) + template := newAWSMachineTemplate("test-template") + template.Status.Capacity = corev1.ResourceList{ + corev1.ResourceCPU: *resource.NewQuantity(2, resource.DecimalSI), + } + template.OwnerReferences = []metav1.OwnerReference{ + { + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Cluster", + Name: "test-cluster", + }, + } + cluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "default", + }, + Spec: clusterv1.ClusterSpec{ + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSCluster", + Name: "test-aws-cluster", + }, + }, + } + awsCluster := &infrav1.AWSCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-aws-cluster", + Namespace: "default", + }, + Spec: infrav1.AWSClusterSpec{ + Region: "us-west-2", + }, + } + + reconciler := &AWSMachineTemplateReconciler{ + Client: newFakeClient(template, cluster, awsCluster), + } + + // This will fail at AWS API call, but demonstrates that reconcile proceeds + result, err := reconciler.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: client.ObjectKeyFromObject(template), + }) + + g.Expect(err).To(BeNil()) + g.Expect(result.RequeueAfter).To(BeZero()) + }) + + t.Run("should skip when instance type is empty", func(t *testing.T) { + g := NewWithT(t) + template := newAWSMachineTemplate("test-template") + template.Spec.Template.Spec.InstanceType = "" + + reconciler := &AWSMachineTemplateReconciler{ + Client: newFakeClient(template), + } + + result, err := reconciler.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: client.ObjectKeyFromObject(template), + }) + + g.Expect(err).To(BeNil()) + g.Expect(result.RequeueAfter).To(BeZero()) + }) + + t.Run("should not reconcile when cluster is paused", func(t *testing.T) { + g := NewWithT(t) + template := newAWSMachineTemplate("test-template") + template.OwnerReferences = []metav1.OwnerReference{ + { + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Cluster", + Name: "test-cluster", + }, + } + cluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "default", + }, + Spec: clusterv1.ClusterSpec{ + Paused: ptr.To(true), + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSCluster", + Name: "test-aws-cluster", + }, + }, + } + awsCluster := &infrav1.AWSCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-aws-cluster", + Namespace: "default", + }, + Spec: infrav1.AWSClusterSpec{ + Region: "us-west-2", + }, + } + + reconciler := &AWSMachineTemplateReconciler{ + Client: newFakeClient(template, cluster, awsCluster), + } + + result, err := reconciler.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: client.ObjectKeyFromObject(template), + }) + + g.Expect(err).To(BeNil()) + g.Expect(result.RequeueAfter).To(BeZero()) + }) + + t.Run("should not reconcile when template has paused annotation", func(t *testing.T) { + g := NewWithT(t) + template := newAWSMachineTemplate("test-template") + template.Annotations = map[string]string{clusterv1.PausedAnnotation: ""} + template.OwnerReferences = []metav1.OwnerReference{ + { + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Cluster", + Name: "test-cluster", + }, + } + cluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "default", + }, + Spec: clusterv1.ClusterSpec{ + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSCluster", + Name: "test-aws-cluster", + }, + }, + } + awsCluster := &infrav1.AWSCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-aws-cluster", + Namespace: "default", + }, + Spec: infrav1.AWSClusterSpec{ + Region: "us-west-2", + }, + } + + reconciler := &AWSMachineTemplateReconciler{ + Client: newFakeClient(template, cluster, awsCluster), + } + + result, err := reconciler.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: client.ObjectKeyFromObject(template), + }) + + g.Expect(err).To(BeNil()) + g.Expect(result.RequeueAfter).To(BeZero()) + }) + + t.Run("should skip when no owner cluster", func(t *testing.T) { + g := NewWithT(t) + template := newAWSMachineTemplate("test-template") + + reconciler := &AWSMachineTemplateReconciler{ + Client: newFakeClient(template), + } + + result, err := reconciler.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: client.ObjectKeyFromObject(template), + }) + + g.Expect(err).To(BeNil()) + g.Expect(result.RequeueAfter).To(BeZero()) + }) + + t.Run("should skip when region is empty", func(t *testing.T) { + g := NewWithT(t) + template := newAWSMachineTemplate("test-template") + template.OwnerReferences = []metav1.OwnerReference{ + { + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Cluster", + Name: "test-cluster", + }, + } + cluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "default", + }, + } + + reconciler := &AWSMachineTemplateReconciler{ + Client: newFakeClient(template, cluster), + } + + result, err := reconciler.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: client.ObjectKeyFromObject(template), + }) + + g.Expect(err).To(BeNil()) + g.Expect(result.RequeueAfter).To(BeZero()) + }) + + t.Run("should return nil when template not found", func(t *testing.T) { + g := NewWithT(t) + + reconciler := &AWSMachineTemplateReconciler{ + Client: newFakeClient(), + } + + result, err := reconciler.Reconcile(context.Background(), ctrl.Request{ + NamespacedName: client.ObjectKey{ + Namespace: "default", + Name: "nonexistent", + }, + }) + + g.Expect(err).To(BeNil()) + g.Expect(result.RequeueAfter).To(BeZero()) + }) + }) +} diff --git a/controllers/awsmanagedcluster_controller.go b/controllers/awsmanagedcluster_controller.go index 560191634b..b5f0beee88 100644 --- a/controllers/awsmanagedcluster_controller.go +++ b/controllers/awsmanagedcluster_controller.go @@ -36,7 +36,7 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" @@ -83,7 +83,7 @@ func (r *AWSManagedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Re controlPlane := &ekscontrolplanev1.AWSManagedControlPlane{} controlPlaneRef := types.NamespacedName{ Name: cluster.Spec.ControlPlaneRef.Name, - Namespace: cluster.Spec.ControlPlaneRef.Namespace, + Namespace: cluster.Namespace, } if err := r.Get(ctx, controlPlaneRef, controlPlane); err != nil { @@ -181,8 +181,8 @@ func (r *AWSManagedClusterReconciler) managedControlPlaneToManagedCluster(_ cont } managedClusterRef := cluster.Spec.InfrastructureRef - if managedClusterRef == nil || managedClusterRef.Kind != "AWSManagedCluster" { - log.Info("InfrastructureRef is nil or not AWSManagedCluster, skipping mapping") + if !managedClusterRef.IsDefined() || managedClusterRef.Kind != "AWSManagedCluster" { + log.Info("InfrastructureRef is not defined or not AWSManagedCluster, skipping mapping") return nil } @@ -190,7 +190,7 @@ func (r *AWSManagedClusterReconciler) managedControlPlaneToManagedCluster(_ cont { NamespacedName: types.NamespacedName{ Name: managedClusterRef.Name, - Namespace: managedClusterRef.Namespace, + Namespace: cluster.Namespace, }, }, } diff --git a/controllers/helpers_test.go b/controllers/helpers_test.go index 05f103cfb6..d964eee194 100644 --- a/controllers/helpers_test.go +++ b/controllers/helpers_test.go @@ -36,8 +36,8 @@ import ( elbService "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/elb" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) const DNSName = "www.google.com" @@ -127,7 +127,7 @@ var ( func expectAWSClusterConditions(g *WithT, m *infrav1.AWSCluster, expected []conditionAssertion) { g.Expect(len(m.Status.Conditions)).To(BeNumerically(">=", len(expected)), "number of conditions") for _, c := range expected { - actual := conditions.Get(m, c.conditionType) + actual := v1beta1conditions.Get(m, c.conditionType) g.Expect(actual).To(Not(BeNil())) g.Expect(actual.Type).To(Equal(c.conditionType)) g.Expect(actual.Status).To(Equal(c.status)) diff --git a/controllers/rosacluster_controller.go b/controllers/rosacluster_controller.go index 8d228ba0f1..219e0d9202 100644 --- a/controllers/rosacluster_controller.go +++ b/controllers/rosacluster_controller.go @@ -22,7 +22,6 @@ import ( cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" "github.com/pkg/errors" - corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -48,8 +47,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" @@ -111,7 +109,7 @@ func (r *ROSAClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) controlPlane := &rosacontrolplanev1.ROSAControlPlane{} controlPlaneRef := types.NamespacedName{ Name: cluster.Spec.ControlPlaneRef.Name, - Namespace: cluster.Spec.ControlPlaneRef.Namespace, + Namespace: cluster.Namespace, } if err := r.Get(ctx, controlPlaneRef, controlPlane); err != nil { @@ -222,8 +220,8 @@ func (r *ROSAClusterReconciler) rosaControlPlaneToManagedCluster(log *logger.Log } rosaClusterRef := cluster.Spec.InfrastructureRef - if rosaClusterRef == nil || rosaClusterRef.Kind != "ROSACluster" { - log.Info("InfrastructureRef is nil or not ROSACluster, skipping mapping") + if !rosaClusterRef.IsDefined() || rosaClusterRef.Kind != "ROSACluster" { + log.Info("InfrastructureRef is not defined or not ROSACluster, skipping mapping") return nil } @@ -231,7 +229,7 @@ func (r *ROSAClusterReconciler) rosaControlPlaneToManagedCluster(log *logger.Log { NamespacedName: types.NamespacedName{ Name: rosaClusterRef.Name, - Namespace: rosaClusterRef.Namespace, + Namespace: cluster.Namespace, }, }, } @@ -262,7 +260,7 @@ func (r *ROSAClusterReconciler) getRosaMachinePoolNames(ctx context.Context, clu } // buildROSAMachinePool returns a ROSAMachinePool and its corresponding MachinePool. -func (r *ROSAClusterReconciler) buildROSAMachinePool(nodePoolName string, clusterName string, namespace string, nodePool *cmv1.NodePool) (*expinfrav1.ROSAMachinePool, *expclusterv1.MachinePool) { +func (r *ROSAClusterReconciler) buildROSAMachinePool(nodePoolName string, clusterName string, namespace string, nodePool *cmv1.NodePool) (*expinfrav1.ROSAMachinePool, *clusterv1.MachinePool) { rosaMPSpec := utils.NodePoolToRosaMachinePoolSpec(nodePool) rosaMachinePool := &expinfrav1.ROSAMachinePool{ TypeMeta: metav1.TypeMeta{ @@ -278,9 +276,9 @@ func (r *ROSAClusterReconciler) buildROSAMachinePool(nodePoolName string, cluste }, Spec: rosaMPSpec, } - machinePool := &expclusterv1.MachinePool{ + machinePool := &clusterv1.MachinePool{ TypeMeta: metav1.TypeMeta{ - APIVersion: expclusterv1.GroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), Kind: "MachinePool", }, ObjectMeta: metav1.ObjectMeta{ @@ -290,7 +288,7 @@ func (r *ROSAClusterReconciler) buildROSAMachinePool(nodePoolName string, cluste clusterv1.ClusterNameLabel: clusterName, }, }, - Spec: expclusterv1.MachinePoolSpec{ + Spec: clusterv1.MachinePoolSpec{ ClusterName: clusterName, Replicas: ptr.To(int32(1)), Template: clusterv1.MachineTemplateSpec{ @@ -299,10 +297,10 @@ func (r *ROSAClusterReconciler) buildROSAMachinePool(nodePoolName string, cluste Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To(string("")), }, - InfrastructureRef: corev1.ObjectReference{ - APIVersion: expinfrav1.GroupVersion.String(), - Kind: "ROSAMachinePool", - Name: rosaMachinePool.Name, + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + APIGroup: expinfrav1.GroupVersion.Group, + Kind: "ROSAMachinePool", + Name: rosaMachinePool.Name, }, }, }, diff --git a/controllers/rosacluster_controller_test.go b/controllers/rosacluster_controller_test.go index 0a7dd42c0b..c91b5510f3 100644 --- a/controllers/rosacluster_controller_test.go +++ b/controllers/rosacluster_controller_test.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -43,8 +44,8 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/patch" ) @@ -95,10 +96,19 @@ func TestRosaClusterReconcile(t *testing.T) { PodCIDR: "10.128.0.0/14", ServiceCIDR: "172.30.0.0/16", }, - Region: "us-east-1", - Version: "4.19.20", - ChannelGroup: "stable", - RolesRef: rosacontrolplanev1.AWSRolesRef{}, + Region: "us-east-1", + Version: "4.19.20", + ChannelGroup: "stable", + RolesRef: rosacontrolplanev1.AWSRolesRef{ + IngressARN: "ingress-arn", + ImageRegistryARN: "image-arn", + StorageARN: "storage-arn", + NetworkARN: "net-arn", + KubeCloudControllerARN: "kube-arn", + NodePoolManagementARN: "node-arn", + ControlPlaneOperatorARN: "control-arn", + KMSProviderARN: "kms-arn", + }, OIDCID: "oidcid1", InstallerRoleARN: "arn1", WorkerRoleARN: "arn2", @@ -136,19 +146,17 @@ func TestRosaClusterReconcile(t *testing.T) { UID: types.UID("capi-cluster-1"), }, Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{ - Name: rosaCluster.Name, - Kind: "ROSACluster", - APIVersion: expinfrav1.GroupVersion.String(), - Namespace: ns.Name, + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Name: rosaCluster.Name, + Kind: "ROSACluster", + APIGroup: expinfrav1.GroupVersion.Group, }, - ControlPlaneRef: &corev1.ObjectReference{ - Name: rosaControlPlane.Name, - Kind: "ROSAControlPlane", - APIVersion: rosacontrolplanev1.GroupVersion.String(), - Namespace: ns.Name, + ControlPlaneRef: clusterv1.ContractVersionedObjectReference{ + Name: rosaControlPlane.Name, + Kind: "ROSAControlPlane", + APIGroup: rosacontrolplanev1.GroupVersion.Group, }, - Paused: false, + Paused: ptr.To(false), }, } @@ -177,12 +185,13 @@ func TestRosaClusterReconcile(t *testing.T) { // set rosaCluster pause conditions rosaClsPatch, err := patch.NewHelper(rosaCluster, testEnv) - rosaCluster.Status.Conditions = clusterv1.Conditions{ - clusterv1.Condition{ - Type: clusterv1.PausedV1Beta2Condition, - Status: corev1.ConditionFalse, - Reason: clusterv1.NotPausedV1Beta2Reason, - Message: "", + rosaCluster.Status.Conditions = clusterv1beta1.Conditions{ + clusterv1beta1.Condition{ + Type: clusterv1beta1.PausedV1Beta2Condition, + Status: corev1.ConditionFalse, + Reason: clusterv1beta1.NotPausedV1Beta2Reason, + Message: "", + LastTransitionTime: metav1.NewTime(time.Now()), }, } g.Expect(rosaClsPatch.Patch(ctx, rosaCluster)).To(Succeed()) @@ -190,12 +199,17 @@ func TestRosaClusterReconcile(t *testing.T) { // set capiCluster pause condition clsPatch, err := patch.NewHelper(capiCluster, testEnv) - capiCluster.Status.Conditions = clusterv1.Conditions{ - clusterv1.Condition{ - Type: clusterv1.PausedV1Beta2Condition, - Status: corev1.ConditionFalse, - Reason: clusterv1.NotPausedV1Beta2Reason, - Message: "", + capiCluster.Status.Deprecated = &clusterv1.ClusterDeprecatedStatus{ + V1Beta1: &clusterv1.ClusterV1Beta1DeprecatedStatus{ + Conditions: clusterv1.Conditions{ + clusterv1.Condition{ + Type: clusterv1beta1.PausedV1Beta2Condition, + Status: corev1.ConditionFalse, + Reason: clusterv1beta1.NotPausedV1Beta2Reason, + Message: "", + LastTransitionTime: metav1.NewTime(time.Now()), + }, + }, }, } g.Expect(clsPatch.Patch(ctx, capiCluster)).To(Succeed()) @@ -282,7 +296,7 @@ func TestRosaClusterReconcile(t *testing.T) { errRosaMP := testEnv.Get(ctx, keyRosaMP, rosaMachinePool) g.Expect(errRosaMP).ToNot(HaveOccurred()) - machinePool := &expclusterv1.MachinePool{} + machinePool := &clusterv1.MachinePool{} keyMP := client.ObjectKey{Name: nodePoolName, Namespace: ns.Name} errMP := testEnv.Get(ctx, keyMP, machinePool) g.Expect(errMP).ToNot(HaveOccurred()) diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 4ee71f9d07..59891ea662 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -31,9 +31,8 @@ import ( rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - kubeadmv1beta1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + kubeadmv1beta1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) var ( @@ -54,7 +53,7 @@ func setup() { utilruntime.Must(rosacontrolplanev1.AddToScheme(scheme.Scheme)) utilruntime.Must(expinfrav1.AddToScheme(scheme.Scheme)) utilruntime.Must(corev1.AddToScheme(scheme.Scheme)) - utilruntime.Must(expclusterv1.AddToScheme(scheme.Scheme)) + utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme)) testEnvConfig := helpers.NewTestEnvironmentConfiguration([]string{ path.Join("config", "crd", "bases"), diff --git a/controlplane/eks/api/v1beta1/awsmanagedcontrolplane_types.go b/controlplane/eks/api/v1beta1/awsmanagedcontrolplane_types.go index a965bef381..607ad2f4de 100644 --- a/controlplane/eks/api/v1beta1/awsmanagedcontrolplane_types.go +++ b/controlplane/eks/api/v1beta1/awsmanagedcontrolplane_types.go @@ -21,7 +21,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( @@ -110,7 +110,7 @@ type AWSManagedControlPlaneSpec struct { //nolint: maligned // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. // +optional - ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"` + ControlPlaneEndpoint clusterv1beta1.APIEndpoint `json:"controlPlaneEndpoint"` // ImageLookupFormat is the AMI naming format to look up machine images when // a machine does not specify an AMI. When set, this will be used for all @@ -244,7 +244,7 @@ type AWSManagedControlPlaneStatus struct { Network infrav1.NetworkStatus `json:"networkStatus,omitempty"` // FailureDomains specifies a list fo available availability zones that can be used // +optional - FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` + FailureDomains clusterv1beta1.FailureDomains `json:"failureDomains,omitempty"` // Bastion holds details of the instance that is used as a bastion jump box // +optional Bastion *infrav1.Instance `json:"bastion,omitempty"` @@ -268,7 +268,7 @@ type AWSManagedControlPlaneStatus struct { // +optional FailureMessage *string `json:"failureMessage,omitempty"` // Conditions specifies the cpnditions for the managed control plane - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // Addons holds the current status of the EKS addons // +optional Addons []AddonState `json:"addons,omitempty"` @@ -308,12 +308,12 @@ type AWSManagedControlPlaneList struct { } // GetConditions returns the control planes conditions. -func (r *AWSManagedControlPlane) GetConditions() clusterv1.Conditions { +func (r *AWSManagedControlPlane) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } // SetConditions sets the status conditions for the AWSManagedControlPlane. -func (r *AWSManagedControlPlane) SetConditions(conditions clusterv1.Conditions) { +func (r *AWSManagedControlPlane) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/controlplane/eks/api/v1beta1/conditions_consts.go b/controlplane/eks/api/v1beta1/conditions_consts.go index 04b7452b19..971a778e33 100644 --- a/controlplane/eks/api/v1beta1/conditions_consts.go +++ b/controlplane/eks/api/v1beta1/conditions_consts.go @@ -16,45 +16,45 @@ limitations under the License. package v1beta1 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +import clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" const ( // EKSControlPlaneReadyCondition condition reports on the successful reconciliation of eks control plane. - EKSControlPlaneReadyCondition clusterv1.ConditionType = "EKSControlPlaneReady" + EKSControlPlaneReadyCondition clusterv1beta1.ConditionType = "EKSControlPlaneReady" // EKSControlPlaneCreatingCondition condition reports on whether the eks // control plane is creating. - EKSControlPlaneCreatingCondition clusterv1.ConditionType = "EKSControlPlaneCreating" + EKSControlPlaneCreatingCondition clusterv1beta1.ConditionType = "EKSControlPlaneCreating" // EKSControlPlaneUpdatingCondition condition reports on whether the eks // control plane is updating. - EKSControlPlaneUpdatingCondition clusterv1.ConditionType = "EKSControlPlaneUpdating" + EKSControlPlaneUpdatingCondition clusterv1beta1.ConditionType = "EKSControlPlaneUpdating" // EKSControlPlaneReconciliationFailedReason used to report failures while reconciling EKS control plane. EKSControlPlaneReconciliationFailedReason = "EKSControlPlaneReconciliationFailed" ) const ( // IAMControlPlaneRolesReadyCondition condition reports on the successful reconciliation of eks control plane iam roles. - IAMControlPlaneRolesReadyCondition clusterv1.ConditionType = "IAMControlPlaneRolesReady" + IAMControlPlaneRolesReadyCondition clusterv1beta1.ConditionType = "IAMControlPlaneRolesReady" // IAMControlPlaneRolesReconciliationFailedReason used to report failures while reconciling EKS control plane iam roles. IAMControlPlaneRolesReconciliationFailedReason = "IAMControlPlaneRolesReconciliationFailed" ) const ( // IAMAuthenticatorConfiguredCondition condition reports on the successful reconciliation of aws-iam-authenticator config. - IAMAuthenticatorConfiguredCondition clusterv1.ConditionType = "IAMAuthenticatorConfigured" + IAMAuthenticatorConfiguredCondition clusterv1beta1.ConditionType = "IAMAuthenticatorConfigured" // IAMAuthenticatorConfigurationFailedReason used to report failures while reconciling the aws-iam-authenticator config. IAMAuthenticatorConfigurationFailedReason = "IAMAuthenticatorConfigurationFailed" ) const ( // EKSAddonsConfiguredCondition condition reports on the successful reconciliation of EKS addons. - EKSAddonsConfiguredCondition clusterv1.ConditionType = "EKSAddonsConfigured" + EKSAddonsConfiguredCondition clusterv1beta1.ConditionType = "EKSAddonsConfigured" // EKSAddonsConfiguredFailedReason used to report failures while reconciling the EKS addons. EKSAddonsConfiguredFailedReason = "EKSAddonsConfiguredFailed" ) const ( // EKSIdentityProviderConfiguredCondition condition reports on the successful association of identity provider config. - EKSIdentityProviderConfiguredCondition clusterv1.ConditionType = "EKSIdentityProviderConfigured" + EKSIdentityProviderConfiguredCondition clusterv1beta1.ConditionType = "EKSIdentityProviderConfigured" // EKSIdentityProviderConfiguredFailedReason used to report failures while reconciling the identity provider config association. EKSIdentityProviderConfiguredFailedReason = "EKSIdentityProviderConfiguredFailed" ) diff --git a/controlplane/eks/api/v1beta1/conversion.go b/controlplane/eks/api/v1beta1/conversion.go index 4039b113d4..c8663e27ec 100644 --- a/controlplane/eks/api/v1beta1/conversion.go +++ b/controlplane/eks/api/v1beta1/conversion.go @@ -20,8 +20,6 @@ import ( apiconversion "k8s.io/apimachinery/pkg/conversion" "sigs.k8s.io/controller-runtime/pkg/conversion" - infrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1" - infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) @@ -117,10 +115,12 @@ func (r *AWSManagedControlPlane) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.Partition = restored.Spec.Partition dst.Spec.RestrictPrivateSubnets = restored.Spec.RestrictPrivateSubnets + dst.Spec.AccessConfig = restored.Spec.AccessConfig dst.Spec.RolePath = restored.Spec.RolePath dst.Spec.RolePermissionsBoundary = restored.Spec.RolePermissionsBoundary dst.Status.Version = restored.Status.Version dst.Spec.BootstrapSelfManagedAddons = restored.Spec.BootstrapSelfManagedAddons + dst.Spec.UpgradePolicy = restored.Spec.UpgradePolicy return nil } @@ -228,36 +228,6 @@ func (r *AWSManagedControlPlaneList) ConvertFrom(srcRaw conversion.Hub) error { return Convert_v1beta2_AWSManagedControlPlaneList_To_v1beta1_AWSManagedControlPlaneList(src, r, nil) } -// Convert_v1beta1_NetworkSpec_To_v1beta2_NetworkSpec is a conversion function. -func Convert_v1beta1_NetworkSpec_To_v1beta2_NetworkSpec(in *infrav1beta1.NetworkSpec, out *infrav1.NetworkSpec, s apiconversion.Scope) error { - return infrav1beta1.Convert_v1beta1_NetworkSpec_To_v1beta2_NetworkSpec(in, out, s) -} - -// Convert_v1beta2_NetworkSpec_To_v1beta1_NetworkSpec is a generated conversion function. -func Convert_v1beta2_NetworkSpec_To_v1beta1_NetworkSpec(in *infrav1.NetworkSpec, out *infrav1beta1.NetworkSpec, s apiconversion.Scope) error { - return infrav1beta1.Convert_v1beta2_NetworkSpec_To_v1beta1_NetworkSpec(in, out, s) -} - -// Convert_v1beta1_NetworkStatus_To_v1beta2_NetworkStatus is a conversion function. -func Convert_v1beta1_NetworkStatus_To_v1beta2_NetworkStatus(in *infrav1beta1.NetworkStatus, out *infrav1.NetworkStatus, s apiconversion.Scope) error { - return infrav1beta1.Convert_v1beta1_NetworkStatus_To_v1beta2_NetworkStatus(in, out, s) -} - -// Convert_v1beta2_NetworkStatus_To_v1beta1_NetworkStatus is a conversion function. -func Convert_v1beta2_NetworkStatus_To_v1beta1_NetworkStatus(in *infrav1.NetworkStatus, out *infrav1beta1.NetworkStatus, s apiconversion.Scope) error { - return infrav1beta1.Convert_v1beta2_NetworkStatus_To_v1beta1_NetworkStatus(in, out, s) -} - -// Convert_v1beta1_Bastion_To_v1beta2_Bastion is a generated conversion function. -func Convert_v1beta1_Bastion_To_v1beta2_Bastion(in *infrav1beta1.Bastion, out *infrav1.Bastion, s apiconversion.Scope) error { - return infrav1beta1.Convert_v1beta1_Bastion_To_v1beta2_Bastion(in, out, s) -} - -// Convert_v1beta2_Bastion_To_v1beta1_Bastion is a generated conversion function. -func Convert_v1beta2_Bastion_To_v1beta1_Bastion(in *infrav1.Bastion, out *infrav1beta1.Bastion, s apiconversion.Scope) error { - return infrav1beta1.Convert_v1beta2_Bastion_To_v1beta1_Bastion(in, out, s) -} - func Convert_v1beta1_AWSManagedControlPlaneSpec_To_v1beta2_AWSManagedControlPlaneSpec(in *AWSManagedControlPlaneSpec, out *ekscontrolplanev1.AWSManagedControlPlaneSpec, s apiconversion.Scope) error { return autoConvert_v1beta1_AWSManagedControlPlaneSpec_To_v1beta2_AWSManagedControlPlaneSpec(in, out, s) } diff --git a/controlplane/eks/api/v1beta1/conversion_test.go b/controlplane/eks/api/v1beta1/conversion_test.go index 32b32fc3e0..ca9f2503b2 100644 --- a/controlplane/eks/api/v1beta1/conversion_test.go +++ b/controlplane/eks/api/v1beta1/conversion_test.go @@ -19,11 +19,11 @@ package v1beta1 import ( "testing" - fuzz "github.com/google/gofuzz" . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/api/apitesting/fuzzer" "k8s.io/apimachinery/pkg/runtime" runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer" + "sigs.k8s.io/randfill" "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" utilconversion "sigs.k8s.io/cluster-api/util/conversion" @@ -35,8 +35,8 @@ func fuzzFuncs(_ runtimeserializer.CodecFactory) []interface{} { } } -func AWSManagedControlPlaneFuzzer(obj *AWSManagedControlPlane, c fuzz.Continue) { - c.FuzzNoCustom(obj) +func AWSManagedControlPlaneFuzzer(obj *AWSManagedControlPlane, c randfill.Continue) { + c.FillNoCustom(obj) obj.Spec.DisableVPCCNI = false } diff --git a/controlplane/eks/api/v1beta1/zz_generated.conversion.go b/controlplane/eks/api/v1beta1/zz_generated.conversion.go index 9fe8517b2f..6e71a2c7ee 100644 --- a/controlplane/eks/api/v1beta1/zz_generated.conversion.go +++ b/controlplane/eks/api/v1beta1/zz_generated.conversion.go @@ -29,7 +29,7 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" apiv1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" v1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" - apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + corev1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func init() { @@ -59,26 +59,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*AWSManagedControlPlaneSpec)(nil), (*v1beta2.AWSManagedControlPlaneSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_AWSManagedControlPlaneSpec_To_v1beta2_AWSManagedControlPlaneSpec(a.(*AWSManagedControlPlaneSpec), b.(*v1beta2.AWSManagedControlPlaneSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.AWSManagedControlPlaneSpec)(nil), (*AWSManagedControlPlaneSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_AWSManagedControlPlaneSpec_To_v1beta1_AWSManagedControlPlaneSpec(a.(*v1beta2.AWSManagedControlPlaneSpec), b.(*AWSManagedControlPlaneSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*AWSManagedControlPlaneStatus)(nil), (*v1beta2.AWSManagedControlPlaneStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_AWSManagedControlPlaneStatus_To_v1beta2_AWSManagedControlPlaneStatus(a.(*AWSManagedControlPlaneStatus), b.(*v1beta2.AWSManagedControlPlaneStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.AWSManagedControlPlaneStatus)(nil), (*AWSManagedControlPlaneStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_AWSManagedControlPlaneStatus_To_v1beta1_AWSManagedControlPlaneStatus(a.(*v1beta2.AWSManagedControlPlaneStatus), b.(*AWSManagedControlPlaneStatus), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*Addon)(nil), (*v1beta2.Addon)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_Addon_To_v1beta2_Addon(a.(*Addon), b.(*v1beta2.Addon), scope) }); err != nil { @@ -224,7 +209,22 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.VpcCni)(nil), (*VpcCni)(nil), func(a, b interface{}, scope conversion.Scope) error { + if err := s.AddConversionFunc((*AWSManagedControlPlaneSpec)(nil), (*v1beta2.AWSManagedControlPlaneSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_AWSManagedControlPlaneSpec_To_v1beta2_AWSManagedControlPlaneSpec(a.(*AWSManagedControlPlaneSpec), b.(*v1beta2.AWSManagedControlPlaneSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.AWSManagedControlPlaneSpec)(nil), (*AWSManagedControlPlaneSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_AWSManagedControlPlaneSpec_To_v1beta1_AWSManagedControlPlaneSpec(a.(*v1beta2.AWSManagedControlPlaneSpec), b.(*AWSManagedControlPlaneSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.AWSManagedControlPlaneStatus)(nil), (*AWSManagedControlPlaneStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_AWSManagedControlPlaneStatus_To_v1beta1_AWSManagedControlPlaneStatus(a.(*v1beta2.AWSManagedControlPlaneStatus), b.(*AWSManagedControlPlaneStatus), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.VpcCni)(nil), (*VpcCni)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta2_VpcCni_To_v1beta1_VpcCni(a.(*v1beta2.VpcCni), b.(*VpcCni), scope) }); err != nil { return err @@ -233,7 +233,6 @@ func RegisterConversions(s *runtime.Scheme) error { } func autoConvert_v1beta1_AWSManagedControlPlane_To_v1beta2_AWSManagedControlPlane(in *AWSManagedControlPlane, out *v1beta2.AWSManagedControlPlane, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_AWSManagedControlPlaneSpec_To_v1beta2_AWSManagedControlPlaneSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -250,7 +249,6 @@ func Convert_v1beta1_AWSManagedControlPlane_To_v1beta2_AWSManagedControlPlane(in } func autoConvert_v1beta2_AWSManagedControlPlane_To_v1beta1_AWSManagedControlPlane(in *v1beta2.AWSManagedControlPlane, out *AWSManagedControlPlane, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta2_AWSManagedControlPlaneSpec_To_v1beta1_AWSManagedControlPlaneSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -267,7 +265,6 @@ func Convert_v1beta2_AWSManagedControlPlane_To_v1beta1_AWSManagedControlPlane(in } func autoConvert_v1beta1_AWSManagedControlPlaneList_To_v1beta2_AWSManagedControlPlaneList(in *AWSManagedControlPlaneList, out *v1beta2.AWSManagedControlPlaneList, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items @@ -289,7 +286,6 @@ func Convert_v1beta1_AWSManagedControlPlaneList_To_v1beta2_AWSManagedControlPlan } func autoConvert_v1beta2_AWSManagedControlPlaneList_To_v1beta1_AWSManagedControlPlaneList(in *v1beta2.AWSManagedControlPlaneList, out *AWSManagedControlPlaneList, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items @@ -375,6 +371,7 @@ func autoConvert_v1beta2_AWSManagedControlPlaneSpec_To_v1beta1_AWSManagedControl out.AssociateOIDCProvider = in.AssociateOIDCProvider out.Addons = (*[]Addon)(unsafe.Pointer(in.Addons)) out.OIDCIdentityProviderConfig = (*OIDCIdentityProviderConfig)(unsafe.Pointer(in.OIDCIdentityProviderConfig)) + // WARNING: in.AccessConfig requires manual conversion: does not exist in peer-type if err := Convert_v1beta2_VpcCni_To_v1beta1_VpcCni(&in.VpcCni, &out.VpcCni, s); err != nil { return err } @@ -383,12 +380,13 @@ func autoConvert_v1beta2_AWSManagedControlPlaneSpec_To_v1beta1_AWSManagedControl if err := Convert_v1beta2_KubeProxy_To_v1beta1_KubeProxy(&in.KubeProxy, &out.KubeProxy, s); err != nil { return err } + // WARNING: in.UpgradePolicy requires manual conversion: does not exist in peer-type return nil } func autoConvert_v1beta1_AWSManagedControlPlaneStatus_To_v1beta2_AWSManagedControlPlaneStatus(in *AWSManagedControlPlaneStatus, out *v1beta2.AWSManagedControlPlaneStatus, s conversion.Scope) error { out.Network = in.Network - out.FailureDomains = *(*apiv1beta1.FailureDomains)(unsafe.Pointer(&in.FailureDomains)) + out.FailureDomains = *(*corev1beta1.FailureDomains)(unsafe.Pointer(&in.FailureDomains)) out.Bastion = (*apiv1beta2.Instance)(unsafe.Pointer(in.Bastion)) if err := Convert_v1beta1_OIDCProviderStatus_To_v1beta2_OIDCProviderStatus(&in.OIDCProvider, &out.OIDCProvider, s); err != nil { return err @@ -397,7 +395,7 @@ func autoConvert_v1beta1_AWSManagedControlPlaneStatus_To_v1beta2_AWSManagedContr out.Initialized = in.Initialized out.Ready = in.Ready out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) out.Addons = *(*[]v1beta2.AddonState)(unsafe.Pointer(&in.Addons)) if err := Convert_v1beta1_IdentityProviderStatus_To_v1beta2_IdentityProviderStatus(&in.IdentityProviderStatus, &out.IdentityProviderStatus, s); err != nil { return err @@ -412,7 +410,7 @@ func Convert_v1beta1_AWSManagedControlPlaneStatus_To_v1beta2_AWSManagedControlPl func autoConvert_v1beta2_AWSManagedControlPlaneStatus_To_v1beta1_AWSManagedControlPlaneStatus(in *v1beta2.AWSManagedControlPlaneStatus, out *AWSManagedControlPlaneStatus, s conversion.Scope) error { out.Network = in.Network - out.FailureDomains = *(*apiv1beta1.FailureDomains)(unsafe.Pointer(&in.FailureDomains)) + out.FailureDomains = *(*corev1beta1.FailureDomains)(unsafe.Pointer(&in.FailureDomains)) out.Bastion = (*apiv1beta2.Instance)(unsafe.Pointer(in.Bastion)) if err := Convert_v1beta2_OIDCProviderStatus_To_v1beta1_OIDCProviderStatus(&in.OIDCProvider, &out.OIDCProvider, s); err != nil { return err @@ -421,7 +419,7 @@ func autoConvert_v1beta2_AWSManagedControlPlaneStatus_To_v1beta1_AWSManagedContr out.Initialized = in.Initialized out.Ready = in.Ready out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) out.Addons = *(*[]AddonState)(unsafe.Pointer(&in.Addons)) if err := Convert_v1beta2_IdentityProviderStatus_To_v1beta1_IdentityProviderStatus(&in.IdentityProviderStatus, &out.IdentityProviderStatus, s); err != nil { return err diff --git a/controlplane/eks/api/v1beta1/zz_generated.deepcopy.go b/controlplane/eks/api/v1beta1/zz_generated.deepcopy.go index f6db3b2da0..b8b79a6d4a 100644 --- a/controlplane/eks/api/v1beta1/zz_generated.deepcopy.go +++ b/controlplane/eks/api/v1beta1/zz_generated.deepcopy.go @@ -24,7 +24,7 @@ import ( "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + corev1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -190,7 +190,7 @@ func (in *AWSManagedControlPlaneStatus) DeepCopyInto(out *AWSManagedControlPlane in.Network.DeepCopyInto(&out.Network) if in.FailureDomains != nil { in, out := &in.FailureDomains, &out.FailureDomains - *out = make(apiv1beta1.FailureDomains, len(*in)) + *out = make(corev1beta1.FailureDomains, len(*in)) for key, val := range *in { (*out)[key] = *val.DeepCopy() } @@ -213,7 +213,7 @@ func (in *AWSManagedControlPlaneStatus) DeepCopyInto(out *AWSManagedControlPlane } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_types.go b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_types.go index a36b35dda3..a904069de9 100644 --- a/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_types.go +++ b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_types.go @@ -21,7 +21,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( @@ -137,7 +137,7 @@ type AWSManagedControlPlaneSpec struct { //nolint: maligned // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. // +optional - ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"` + ControlPlaneEndpoint clusterv1beta1.APIEndpoint `json:"controlPlaneEndpoint"` // ImageLookupFormat is the AMI naming format to look up machine images when // a machine does not specify an AMI. When set, this will be used for all @@ -187,11 +187,15 @@ type AWSManagedControlPlaneSpec struct { //nolint: maligned // +optional Addons *[]Addon `json:"addons,omitempty"` - // IdentityProviderconfig is used to specify the oidc provider config + // OIDCIdentityProviderConfig is used to specify the OIDC provider config // to be attached with this eks cluster // +optional OIDCIdentityProviderConfig *OIDCIdentityProviderConfig `json:"oidcIdentityProviderConfig,omitempty"` + // AccessConfig specifies the access configuration information for the cluster + // +optional + AccessConfig *AccessConfig `json:"accessConfig,omitempty"` + // VpcCni is used to set configuration options for the VPC CNI plugin // +optional VpcCni VpcCni `json:"vpcCni,omitempty"` @@ -208,6 +212,15 @@ type AWSManagedControlPlaneSpec struct { //nolint: maligned // KubeProxy defines managed attributes of the kube-proxy daemonset KubeProxy KubeProxy `json:"kubeProxy,omitempty"` + + // The cluster upgrade policy to use for the cluster. + // (Official AWS docs for this policy: https://docs.aws.amazon.com/eks/latest/userguide/view-upgrade-policy.html) + // `extended` upgrade policy indicates that the cluster will enter into extended support once the Kubernetes version reaches end of standard support. You will incur extended support charges with this setting. You can upgrade your cluster to a standard supported Kubernetes version to stop incurring extended support charges. + // `standard` upgrade policy indicates that the cluster is eligible for automatic upgrade at the end of standard support. You will not incur extended support charges with this setting but your EKS cluster will automatically upgrade to the next supported Kubernetes version in standard support. + // If omitted, new clusters will use the AWS default upgrade policy (which at the time of writing is "extended") and existing clusters will have their upgrade policy unchanged. + // +kubebuilder:validation:Enum=extended;standard + // +optional + UpgradePolicy UpgradePolicy `json:"upgradePolicy,omitempty"` } // KubeProxy specifies how the kube-proxy daemonset is managed. @@ -248,6 +261,21 @@ type EndpointAccess struct { Private *bool `json:"private,omitempty"` } +// AccessConfig represents the access configuration information for the cluster +type AccessConfig struct { + // AuthenticationMode specifies the desired authentication mode for the cluster + // Defaults to config_map + // +kubebuilder:default=config_map + // +kubebuilder:validation:Enum=config_map;api;api_and_config_map + AuthenticationMode EKSAuthenticationMode `json:"authenticationMode,omitempty"` + + // BootstrapClusterCreatorAdminPermissions grants cluster admin permissions + // to the IAM identity creating the cluster. Only applied during creation, + // ignored when updating existing clusters. Defaults to true. + // +kubebuilder:default=true + BootstrapClusterCreatorAdminPermissions *bool `json:"bootstrapClusterCreatorAdminPermissions,omitempty"` +} + // EncryptionConfig specifies the encryption configuration for the EKS clsuter. type EncryptionConfig struct { // Provider specifies the ARN or alias of the CMK (in AWS KMS) @@ -280,7 +308,7 @@ type AWSManagedControlPlaneStatus struct { Network infrav1.NetworkStatus `json:"networkStatus,omitempty"` // FailureDomains specifies a list fo available availability zones that can be used // +optional - FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` + FailureDomains clusterv1beta1.FailureDomains `json:"failureDomains,omitempty"` // Bastion holds details of the instance that is used as a bastion jump box // +optional Bastion *infrav1.Instance `json:"bastion,omitempty"` @@ -304,7 +332,7 @@ type AWSManagedControlPlaneStatus struct { // +optional FailureMessage *string `json:"failureMessage,omitempty"` // Conditions specifies the cpnditions for the managed control plane - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // Addons holds the current status of the EKS addons // +optional Addons []AddonState `json:"addons,omitempty"` @@ -347,12 +375,12 @@ type AWSManagedControlPlaneList struct { } // GetConditions returns the control planes conditions. -func (r *AWSManagedControlPlane) GetConditions() clusterv1.Conditions { +func (r *AWSManagedControlPlane) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } // SetConditions sets the status conditions for the AWSManagedControlPlane. -func (r *AWSManagedControlPlane) SetConditions(conditions clusterv1.Conditions) { +func (r *AWSManagedControlPlane) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook.go b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook.go index ffb4e891b8..5554eff7c1 100644 --- a/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook.go +++ b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook.go @@ -107,6 +107,7 @@ func (*awsManagedControlPlaneWebhook) ValidateCreate(_ context.Context, obj runt allErrs = append(allErrs, r.Spec.AdditionalTags.Validate()...) allErrs = append(allErrs, r.validateNetwork()...) allErrs = append(allErrs, r.validatePrivateDNSHostnameTypeOnLaunch()...) + allErrs = append(allErrs, r.validateAccessConfigCreate()...) if len(allErrs) == 0 { return nil, nil @@ -140,6 +141,7 @@ func (*awsManagedControlPlaneWebhook) ValidateUpdate(ctx context.Context, oldObj allErrs = append(allErrs, r.validateEKSClusterNameSame(oldAWSManagedControlplane)...) allErrs = append(allErrs, r.validateEKSVersion(oldAWSManagedControlplane)...) allErrs = append(allErrs, r.Spec.Bastion.Validate()...) + allErrs = append(allErrs, r.validateAccessConfigUpdate(oldAWSManagedControlplane)...) allErrs = append(allErrs, r.validateIAMAuthConfig()...) allErrs = append(allErrs, r.validateSecondaryCIDR()...) allErrs = append(allErrs, r.validateEKSAddons()...) @@ -318,6 +320,53 @@ func validateEKSAddons(eksVersion *string, networkSpec infrav1.NetworkSpec, addo return allErrs } +func (r *AWSManagedControlPlane) validateAccessConfigUpdate(old *AWSManagedControlPlane) field.ErrorList { + var allErrs field.ErrorList + + // If accessConfig is already set, do not allow removal of it. + if old.Spec.AccessConfig != nil && r.Spec.AccessConfig == nil { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec", "accessConfig"), r.Spec.AccessConfig, "removing AccessConfig is not allowed after it has been enabled"), + ) + } + + // AuthenticationMode is ratcheting - do not allow downgrades + if old.Spec.AccessConfig != nil && r.Spec.AccessConfig != nil && + old.Spec.AccessConfig.AuthenticationMode != r.Spec.AccessConfig.AuthenticationMode && + ((old.Spec.AccessConfig.AuthenticationMode == EKSAuthenticationModeAPIAndConfigMap && r.Spec.AccessConfig.AuthenticationMode == EKSAuthenticationModeConfigMap) || + old.Spec.AccessConfig.AuthenticationMode == EKSAuthenticationModeAPI) { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec", "accessConfig", "authenticationMode"), r.Spec.AccessConfig.AuthenticationMode, "downgrading authentication mode is not allowed after it has been enabled"), + ) + } + + // BootstrapClusterCreatorAdminPermissions only applies on create, but changes should not invalidate updates + if old.Spec.AccessConfig != nil && r.Spec.AccessConfig != nil && + old.Spec.AccessConfig.BootstrapClusterCreatorAdminPermissions != r.Spec.AccessConfig.BootstrapClusterCreatorAdminPermissions { + mcpLog.Info("Ignoring changes to BootstrapClusterCreatorAdminPermissions on cluster update", "old", old.Spec.AccessConfig.BootstrapClusterCreatorAdminPermissions, "new", r.Spec.AccessConfig.BootstrapClusterCreatorAdminPermissions) + } + + return allErrs +} + +func (r *AWSManagedControlPlane) validateAccessConfigCreate() field.ErrorList { + var allErrs field.ErrorList + + if r.Spec.AccessConfig != nil { + if r.Spec.AccessConfig.AuthenticationMode == EKSAuthenticationModeConfigMap && + r.Spec.AccessConfig.BootstrapClusterCreatorAdminPermissions != nil && + !*r.Spec.AccessConfig.BootstrapClusterCreatorAdminPermissions { + allErrs = append(allErrs, + field.Invalid(field.NewPath("spec", "accessConfig", "bootstrapClusterCreatorAdminPermissions"), + *r.Spec.AccessConfig.BootstrapClusterCreatorAdminPermissions, + "bootstrapClusterCreatorAdminPermissions must be true if cluster authentication mode is set to config_map"), + ) + } + } + + return allErrs +} + func (r *AWSManagedControlPlane) validateIAMAuthConfig() field.ErrorList { return validateIAMAuthConfig(r.Spec.IAMAuthenticatorConfig, field.NewPath("spec.iamAuthenticatorConfig")) } diff --git a/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook_test.go b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook_test.go index 276faa5b09..40de7b369b 100644 --- a/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook_test.go +++ b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_webhook_test.go @@ -179,6 +179,7 @@ func TestWebhookCreate(t *testing.T) { secondaryCidr *string secondaryCidrBlocks []infrav1.VpcCidrBlock kubeProxy KubeProxy + accessConfig *AccessConfig }{ { name: "ekscluster specified", @@ -322,6 +323,47 @@ func TestWebhookCreate(t *testing.T) { Disable: true, }, }, + { + name: "BootstrapClusterCreatorAdminPermissions true with EKSAuthenticationModeConfigMap", + eksClusterName: "default_cluster1", + eksVersion: "v1.19", + expectError: false, + accessConfig: &AccessConfig{ + AuthenticationMode: EKSAuthenticationModeConfigMap, + BootstrapClusterCreatorAdminPermissions: ptr.To(true), + }, + }, + { + name: "BootstrapClusterCreatorAdminPermissions false with EKSAuthenticationModeConfigMap", + eksClusterName: "default_cluster1", + eksVersion: "v1.19", + expectError: true, + expectErrorToContain: "bootstrapClusterCreatorAdminPermissions must be true if cluster authentication mode is set to config_map", + accessConfig: &AccessConfig{ + AuthenticationMode: EKSAuthenticationModeConfigMap, + BootstrapClusterCreatorAdminPermissions: ptr.To(false), + }, + }, + { + name: "BootstrapClusterCreatorAdminPermissions false with EKSAuthenticationModeAPIAndConfigMap", + eksClusterName: "default_cluster1", + eksVersion: "v1.19", + expectError: false, + accessConfig: &AccessConfig{ + AuthenticationMode: EKSAuthenticationModeAPIAndConfigMap, + BootstrapClusterCreatorAdminPermissions: ptr.To(false), + }, + }, + { + name: "BootstrapClusterCreatorAdminPermissions false with EKSAuthenticationModeAPI", + eksClusterName: "default_cluster1", + eksVersion: "v1.19", + expectError: false, + accessConfig: &AccessConfig{ + AuthenticationMode: EKSAuthenticationModeAPI, + BootstrapClusterCreatorAdminPermissions: ptr.To(false), + }, + }, } for _, tc := range tests { @@ -365,6 +407,9 @@ func TestWebhookCreate(t *testing.T) { if tc.secondaryCidr != nil { mcp.Spec.SecondaryCidrBlock = tc.secondaryCidr } + if tc.accessConfig != nil { + mcp.Spec.AccessConfig = tc.accessConfig + } err := testEnv.Create(ctx, mcp) @@ -603,6 +648,112 @@ func TestWebhookUpdate(t *testing.T) { }, expectError: false, }, + { + name: "no change in access config", + oldClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + AccessConfig: &AccessConfig{ + AuthenticationMode: EKSAuthenticationModeConfigMap, + }, + }, + newClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + AccessConfig: &AccessConfig{ + AuthenticationMode: EKSAuthenticationModeConfigMap, + }, + }, + expectError: false, + }, + { + name: "change in access config to nil", + oldClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + AccessConfig: &AccessConfig{ + AuthenticationMode: EKSAuthenticationModeConfigMap, + }, + }, + newClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + }, + expectError: true, + }, + { + name: "change in access config from nil to valid", + oldClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + }, + newClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + AccessConfig: &AccessConfig{ + AuthenticationMode: EKSAuthenticationModeConfigMap, + }, + }, + expectError: false, + }, + { + name: "change in access config auth mode from ApiAndConfigMap to API is allowed", + oldClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + AccessConfig: &AccessConfig{ + AuthenticationMode: EKSAuthenticationModeAPIAndConfigMap, + }, + }, + newClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + AccessConfig: &AccessConfig{ + AuthenticationMode: EKSAuthenticationModeAPI, + }, + }, + expectError: false, + }, + { + name: "change in access config auth mode from API to Config Map is denied", + oldClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + AccessConfig: &AccessConfig{ + AuthenticationMode: EKSAuthenticationModeAPI, + }, + }, + newClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + AccessConfig: &AccessConfig{ + AuthenticationMode: EKSAuthenticationModeConfigMap, + }, + }, + expectError: true, + }, + { + name: "change in access config auth mode from APIAndConfigMap to Config Map is denied", + oldClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + AccessConfig: &AccessConfig{ + AuthenticationMode: EKSAuthenticationModeAPIAndConfigMap, + }, + }, + newClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + AccessConfig: &AccessConfig{ + AuthenticationMode: EKSAuthenticationModeConfigMap, + }, + }, + expectError: true, + }, + { + name: "change in access config bootstrap admin permissions is ignored", + oldClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + AccessConfig: &AccessConfig{ + BootstrapClusterCreatorAdminPermissions: ptr.To(true), + }, + }, + newClusterSpec: AWSManagedControlPlaneSpec{ + EKSClusterName: "default_cluster1", + AccessConfig: &AccessConfig{ + BootstrapClusterCreatorAdminPermissions: ptr.To(false), + }, + }, + expectError: false, + }, { name: "change in encryption config to nil", oldClusterSpec: AWSManagedControlPlaneSpec{ diff --git a/controlplane/eks/api/v1beta2/conditions_consts.go b/controlplane/eks/api/v1beta2/conditions_consts.go index fc8fa66721..d711b0d11f 100644 --- a/controlplane/eks/api/v1beta2/conditions_consts.go +++ b/controlplane/eks/api/v1beta2/conditions_consts.go @@ -16,45 +16,45 @@ limitations under the License. package v1beta2 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +import clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" const ( // EKSControlPlaneReadyCondition condition reports on the successful reconciliation of eks control plane. - EKSControlPlaneReadyCondition clusterv1.ConditionType = "EKSControlPlaneReady" + EKSControlPlaneReadyCondition clusterv1beta1.ConditionType = "EKSControlPlaneReady" // EKSControlPlaneCreatingCondition condition reports on whether the eks // control plane is creating. - EKSControlPlaneCreatingCondition clusterv1.ConditionType = "EKSControlPlaneCreating" + EKSControlPlaneCreatingCondition clusterv1beta1.ConditionType = "EKSControlPlaneCreating" // EKSControlPlaneUpdatingCondition condition reports on whether the eks // control plane is updating. - EKSControlPlaneUpdatingCondition clusterv1.ConditionType = "EKSControlPlaneUpdating" + EKSControlPlaneUpdatingCondition clusterv1beta1.ConditionType = "EKSControlPlaneUpdating" // EKSControlPlaneReconciliationFailedReason used to report failures while reconciling EKS control plane. EKSControlPlaneReconciliationFailedReason = "EKSControlPlaneReconciliationFailed" ) const ( // IAMControlPlaneRolesReadyCondition condition reports on the successful reconciliation of eks control plane iam roles. - IAMControlPlaneRolesReadyCondition clusterv1.ConditionType = "IAMControlPlaneRolesReady" + IAMControlPlaneRolesReadyCondition clusterv1beta1.ConditionType = "IAMControlPlaneRolesReady" // IAMControlPlaneRolesReconciliationFailedReason used to report failures while reconciling EKS control plane iam roles. IAMControlPlaneRolesReconciliationFailedReason = "IAMControlPlaneRolesReconciliationFailed" ) const ( // IAMAuthenticatorConfiguredCondition condition reports on the successful reconciliation of aws-iam-authenticator config. - IAMAuthenticatorConfiguredCondition clusterv1.ConditionType = "IAMAuthenticatorConfigured" + IAMAuthenticatorConfiguredCondition clusterv1beta1.ConditionType = "IAMAuthenticatorConfigured" // IAMAuthenticatorConfigurationFailedReason used to report failures while reconciling the aws-iam-authenticator config. IAMAuthenticatorConfigurationFailedReason = "IAMAuthenticatorConfigurationFailed" ) const ( // EKSAddonsConfiguredCondition condition reports on the successful reconciliation of EKS addons. - EKSAddonsConfiguredCondition clusterv1.ConditionType = "EKSAddonsConfigured" + EKSAddonsConfiguredCondition clusterv1beta1.ConditionType = "EKSAddonsConfigured" // EKSAddonsConfiguredFailedReason used to report failures while reconciling the EKS addons. EKSAddonsConfiguredFailedReason = "EKSAddonsConfiguredFailed" ) const ( // EKSIdentityProviderConfiguredCondition condition reports on the successful association of identity provider config. - EKSIdentityProviderConfiguredCondition clusterv1.ConditionType = "EKSIdentityProviderConfigured" + EKSIdentityProviderConfiguredCondition clusterv1beta1.ConditionType = "EKSIdentityProviderConfigured" // EKSIdentityProviderConfiguredFailedReason used to report failures while reconciling the identity provider config association. EKSIdentityProviderConfiguredFailedReason = "EKSIdentityProviderConfiguredFailed" ) diff --git a/controlplane/eks/api/v1beta2/types.go b/controlplane/eks/api/v1beta2/types.go index 622e4b9c3d..60cd4b454d 100644 --- a/controlplane/eks/api/v1beta2/types.go +++ b/controlplane/eks/api/v1beta2/types.go @@ -18,6 +18,7 @@ package v1beta2 import ( "fmt" + "strings" ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -79,6 +80,26 @@ var ( EKSTokenMethodAWSCli = EKSTokenMethod("aws-cli") ) +// EKSAuthenticationMode defines the authentication mode for the cluster +type EKSAuthenticationMode string + +// APIValue returns the corresponding EKS API value for the authentication mode +func (e EKSAuthenticationMode) APIValue() ekstypes.AuthenticationMode { + return ekstypes.AuthenticationMode(strings.ToUpper(string(e))) +} + +var ( + // EKSAuthenticationModeConfigMap indicates that only `aws-auth` ConfigMap will be used for authentication + EKSAuthenticationModeConfigMap = EKSAuthenticationMode("config_map") + + // EKSAuthenticationModeAPI indicates that only AWS Access Entries will be used for authentication + EKSAuthenticationModeAPI = EKSAuthenticationMode("api") + + // EKSAuthenticationModeAPIAndConfigMap indicates that both `aws-auth` ConfigMap and AWS Access Entries will + // be used for authentication + EKSAuthenticationModeAPIAndConfigMap = EKSAuthenticationMode("api_and_config_map") +) + var ( // DefaultEKSControlPlaneRole is the name of the default IAM role to use for the EKS control plane // if no other role is supplied in the spec and if iam role creation is not enabled. The default @@ -220,6 +241,24 @@ type AddonIssue struct { ResourceIDs []string `json:"resourceIds,omitempty"` } +// UpgradePolicy defines the support policy to use for the cluster. +type UpgradePolicy string + +var ( + // UpgradePolicyExtended indicates that the cluster will enter into extended support once the Kubernetes version reaches end of standard support. + // You will incur extended support charges with this setting. + // You can upgrade your cluster to a standard supported Kubernetes version to stop incurring extended support charges. + UpgradePolicyExtended = UpgradePolicy("extended") + + // UpgradePolicyStandard indicates that the cluster is eligible for automatic upgrade at the end of standard support. + // You will not incur extended support charges with this setting but your EKS cluster will automatically upgrade to the next supported Kubernetes version in standard support. + UpgradePolicyStandard = UpgradePolicy("standard") +) + +func (e UpgradePolicy) String() string { + return string(e) +} + const ( // SecurityGroupCluster is the security group for communication between EKS // control plane and managed node groups. diff --git a/controlplane/eks/api/v1beta2/zz_generated.deepcopy.go b/controlplane/eks/api/v1beta2/zz_generated.deepcopy.go index 807613dc0d..0e1b766d8b 100644 --- a/controlplane/eks/api/v1beta2/zz_generated.deepcopy.go +++ b/controlplane/eks/api/v1beta2/zz_generated.deepcopy.go @@ -24,7 +24,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" apiv1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -170,6 +170,11 @@ func (in *AWSManagedControlPlaneSpec) DeepCopyInto(out *AWSManagedControlPlaneSp *out = new(OIDCIdentityProviderConfig) (*in).DeepCopyInto(*out) } + if in.AccessConfig != nil { + in, out := &in.AccessConfig, &out.AccessConfig + *out = new(AccessConfig) + (*in).DeepCopyInto(*out) + } in.VpcCni.DeepCopyInto(&out.VpcCni) out.KubeProxy = in.KubeProxy } @@ -333,6 +338,26 @@ func (in *AWSManagedControlPlaneTemplateSpec) DeepCopy() *AWSManagedControlPlane return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccessConfig) DeepCopyInto(out *AccessConfig) { + *out = *in + if in.BootstrapClusterCreatorAdminPermissions != nil { + in, out := &in.BootstrapClusterCreatorAdminPermissions, &out.BootstrapClusterCreatorAdminPermissions + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccessConfig. +func (in *AccessConfig) DeepCopy() *AccessConfig { + if in == nil { + return nil + } + out := new(AccessConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Addon) DeepCopyInto(out *Addon) { *out = *in diff --git a/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go b/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go index 1a3a3583d5..53f2c1f73b 100644 --- a/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go +++ b/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -52,9 +53,10 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/securitygroup" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -178,7 +180,7 @@ func (r *AWSManagedControlPlaneReconciler) SetupWithManager(ctx context.Context, if err = c.Watch( source.Kind[client.Object](mgr.GetCache(), &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, awsManagedControlPlane.GroupVersionKind(), mgr.GetClient(), &ekscontrolplanev1.AWSManagedControlPlane{})), - predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), log.GetLogger())), + predicates.ClusterPausedTransitionsOrInfrastructureProvisioned(mgr.GetScheme(), log.GetLogger())), ); err != nil { return fmt.Errorf("failed adding a watch for ready clusters: %w", err) } @@ -258,7 +260,7 @@ func (r *AWSManagedControlPlaneReconciler) Reconcile(ctx context.Context, req ct // Always close the scope defer func() { - applicableConditions := []clusterv1.ConditionType{ + applicableConditions := []clusterv1beta1.ConditionType{ ekscontrolplanev1.EKSControlPlaneReadyCondition, ekscontrolplanev1.IAMControlPlaneRolesReadyCondition, ekscontrolplanev1.IAMAuthenticatorConfiguredCondition, @@ -283,7 +285,7 @@ func (r *AWSManagedControlPlaneReconciler) Reconcile(ctx context.Context, req ct } } - conditions.SetSummary(managedScope.ControlPlane, conditions.WithConditions(applicableConditions...), conditions.WithStepCounter()) + v1beta1conditions.SetSummary(managedScope.ControlPlane, v1beta1conditions.WithConditions(applicableConditions...), v1beta1conditions.WithStepCounter()) if err := managedScope.Close(); err != nil && reterr == nil { reterr = err @@ -302,7 +304,7 @@ func (r *AWSManagedControlPlaneReconciler) Reconcile(ctx context.Context, req ct func (r *AWSManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, managedScope *scope.ManagedControlPlaneScope) (res ctrl.Result, reterr error) { managedScope.Info("Reconciling AWSManagedControlPlane") - if managedScope.Cluster.Spec.InfrastructureRef == nil { + if !managedScope.Cluster.Spec.InfrastructureRef.IsDefined() { managedScope.Info("InfrastructureRef not set, skipping reconciliation") return ctrl.Result{}, nil } @@ -312,7 +314,7 @@ func (r *AWSManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, // infrastructureRef and controlplaneRef. if managedScope.Cluster.Spec.InfrastructureRef.Kind != awsManagedControlPlaneKind { // Wait for the cluster infrastructure to be ready before creating machines - if !managedScope.Cluster.Status.InfrastructureReady { + if !ptr.Deref(managedScope.Cluster.Status.Initialization.InfrastructureProvisioned, false) { managedScope.Info("Cluster infrastructure is not ready yet") return ctrl.Result{RequeueAfter: r.WaitInfraPeriod}, nil } @@ -339,12 +341,12 @@ func (r *AWSManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, } if err := sgService.ReconcileSecurityGroups(); err != nil { - conditions.MarkFalse(awsManagedControlPlane, infrav1.ClusterSecurityGroupsReadyCondition, infrav1.ClusterSecurityGroupReconciliationFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(awsManagedControlPlane, infrav1.ClusterSecurityGroupsReadyCondition, infrav1.ClusterSecurityGroupReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile general security groups for AWSManagedControlPlane %s/%s", awsManagedControlPlane.Namespace, awsManagedControlPlane.Name) } if err := ec2Service.ReconcileBastion(); err != nil { - conditions.MarkFalse(awsManagedControlPlane, infrav1.BastionHostReadyCondition, infrav1.BastionHostFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(awsManagedControlPlane, infrav1.BastionHostReadyCondition, infrav1.BastionHostFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return reconcile.Result{}, fmt.Errorf("failed to reconcile bastion host for AWSManagedControlPlane %s/%s: %w", awsManagedControlPlane.Namespace, awsManagedControlPlane.Name, err) } @@ -353,7 +355,7 @@ func (r *AWSManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, } if err := awsnodeService.ReconcileCNI(ctx); err != nil { - conditions.MarkFalse(managedScope.InfraCluster(), infrav1.SecondaryCidrsReadyCondition, infrav1.SecondaryCidrReconciliationFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(managedScope.InfraCluster(), infrav1.SecondaryCidrsReadyCondition, infrav1.SecondaryCidrReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return reconcile.Result{}, fmt.Errorf("failed to reconcile control plane for AWSManagedControlPlane %s/%s: %w", awsManagedControlPlane.Namespace, awsManagedControlPlane.Name, err) } @@ -369,14 +371,14 @@ func (r *AWSManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, } } if err := authService.ReconcileIAMAuthenticator(ctx); err != nil { - conditions.MarkFalse(awsManagedControlPlane, ekscontrolplanev1.IAMAuthenticatorConfiguredCondition, ekscontrolplanev1.IAMAuthenticatorConfigurationFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(awsManagedControlPlane, ekscontrolplanev1.IAMAuthenticatorConfiguredCondition, ekscontrolplanev1.IAMAuthenticatorConfigurationFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile aws-iam-authenticator config for AWSManagedControlPlane %s/%s", awsManagedControlPlane.Namespace, awsManagedControlPlane.Name) } - conditions.MarkTrue(awsManagedControlPlane, ekscontrolplanev1.IAMAuthenticatorConfiguredCondition) + v1beta1conditions.MarkTrue(awsManagedControlPlane, ekscontrolplanev1.IAMAuthenticatorConfiguredCondition) for _, subnet := range managedScope.Subnets().FilterPrivate() { - managedScope.SetFailureDomain(subnet.AvailabilityZone, clusterv1.FailureDomainSpec{ - ControlPlane: true, + managedScope.SetFailureDomain(subnet.AvailabilityZone, clusterv1.FailureDomain{ + ControlPlane: ptr.To(true), }) } @@ -451,8 +453,8 @@ func (r *AWSManagedControlPlaneReconciler) ClusterToAWSManagedControlPlane(o cli } controlPlaneRef := c.Spec.ControlPlaneRef - if controlPlaneRef != nil && controlPlaneRef.Kind == awsManagedControlPlaneKind { - return []ctrl.Request{{NamespacedName: client.ObjectKey{Namespace: controlPlaneRef.Namespace, Name: controlPlaneRef.Name}}} + if controlPlaneRef.Kind == awsManagedControlPlaneKind { + return []ctrl.Request{{NamespacedName: client.ObjectKey{Namespace: c.Namespace, Name: controlPlaneRef.Name}}} } return nil @@ -467,7 +469,7 @@ func (r *AWSManagedControlPlaneReconciler) dependencyCount(ctx context.Context, listOptions := []client.ListOption{ client.InNamespace(namespace), - client.MatchingLabels(map[string]string{clusterv1.ClusterNameLabel: clusterName}), + client.MatchingLabels(map[string]string{clusterv1beta1.ClusterNameLabel: clusterName}), } dependencies := 0 @@ -522,8 +524,8 @@ func (r *AWSManagedControlPlaneReconciler) managedClusterToManagedControlPlane(_ } controlPlaneRef := cluster.Spec.ControlPlaneRef - if controlPlaneRef == nil || controlPlaneRef.Kind != awsManagedControlPlaneKind { - log.Debug("ControlPlaneRef is nil or not AWSManagedControlPlane, skipping mapping") + if controlPlaneRef.Kind != awsManagedControlPlaneKind { + log.Debug("ControlPlaneRef is not defined or not AWSManagedControlPlane, skipping mapping") return nil } @@ -531,7 +533,7 @@ func (r *AWSManagedControlPlaneReconciler) managedClusterToManagedControlPlane(_ { NamespacedName: types.NamespacedName{ Name: controlPlaneRef.Name, - Namespace: controlPlaneRef.Namespace, + Namespace: cluster.Namespace, }, }, } diff --git a/controlplane/eks/controllers/awsmanagedcontrolplane_controller_test.go b/controlplane/eks/controllers/awsmanagedcontrolplane_controller_test.go index 483992024d..b5d3b68455 100644 --- a/controlplane/eks/controllers/awsmanagedcontrolplane_controller_test.go +++ b/controlplane/eks/controllers/awsmanagedcontrolplane_controller_test.go @@ -39,6 +39,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -56,7 +57,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/securitygroup" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/sts/mock_stsiface" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/patch" ) @@ -138,7 +139,8 @@ func TestAWSManagedControlPlaneReconcilerIntegrationTests(t *testing.T) { mockedEKSCluster(ctx, g, eksMock.EXPECT(), iamMock.EXPECT(), ec2Mock.EXPECT(), stsMock.EXPECT(), awsNodeMock.EXPECT(), kubeProxyMock.EXPECT(), iamAuthenticatorMock.EXPECT()) g.Expect(testEnv.Create(ctx, &cluster)).To(Succeed()) - cluster.Status.InfrastructureReady = true + cluster.Status.Initialization.InfrastructureProvisioned = ptr.To(true) + g.Expect(testEnv.Client.Status().Update(ctx, &cluster)).To(Succeed()) g.Expect(testEnv.Create(ctx, &awsManagedCluster)).To(Succeed()) g.Expect(testEnv.Create(ctx, &awsManagedControlPlane)).To(Succeed()) @@ -158,11 +160,13 @@ func TestAWSManagedControlPlaneReconcilerIntegrationTests(t *testing.T) { // patch the paused condition awsManagedControlPlanePatcher, err := patch.NewHelper(&awsManagedControlPlane, testEnv) - awsManagedControlPlane.Status.Conditions = clusterv1.Conditions{ + awsManagedControlPlane.Status.Conditions = clusterv1beta1.Conditions{ { - Type: "Paused", - Status: corev1.ConditionFalse, - Reason: "NotPaused", + Type: "Paused", + Status: corev1.ConditionFalse, + Reason: "NotPaused", + Message: "", + LastTransitionTime: metav1.NewTime(time.Now()), }, } @@ -465,7 +469,8 @@ func mockedCallsForMissingEverything(ec2Rec *mocks.MockEC2APIMockRecorder, subne Name: aws.String("tag-key"), Values: []string{"sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"}, }, - }})).Return(&ec2.DescribeRouteTablesOutput{ + }, + })).Return(&ec2.DescribeRouteTablesOutput{ RouteTables: []ec2types.RouteTable{ { Routes: []ec2types.Route{ @@ -525,7 +530,8 @@ func mockedCallsForMissingEverything(ec2Rec *mocks.MockEC2APIMockRecorder, subne Name: aws.String("state"), Values: []string{string(ec2types.VpcStatePending), string(ec2types.VpcStateAvailable)}, }, - }}), gomock.Any()).Return(&ec2.DescribeNatGatewaysOutput{}, nil).MinTimes(1).MaxTimes(2) + }, + }), gomock.Any()).Return(&ec2.DescribeNatGatewaysOutput{}, nil).MinTimes(1).MaxTimes(2) ec2Rec.DescribeAddresses(context.TODO(), gomock.Eq(&ec2.DescribeAddressesInput{ Filters: []ec2types.Filter{ diff --git a/controlplane/eks/controllers/helpers_test.go b/controlplane/eks/controllers/helpers_test.go index e79c2265b0..dab4f69001 100644 --- a/controlplane/eks/controllers/helpers_test.go +++ b/controlplane/eks/controllers/helpers_test.go @@ -16,7 +16,6 @@ limitations under the License. package controllers import ( - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -25,7 +24,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func getAWSManagedControlPlaneScope(cluster *clusterv1.Cluster, awsManagedControlPlane *ekscontrolplanev1.AWSManagedControlPlane) *scope.ManagedControlPlaneScope { @@ -50,17 +49,15 @@ func getManagedClusterObjects(name, namespace string) (clusterv1.Cluster, infrav UID: "1", }, Spec: clusterv1.ClusterSpec{ - ControlPlaneRef: &corev1.ObjectReference{ - APIVersion: ekscontrolplanev1.GroupVersion.String(), - Name: name, - Kind: "AWSManagedControlPlane", - Namespace: namespace, + ControlPlaneRef: clusterv1.ContractVersionedObjectReference{ + APIGroup: ekscontrolplanev1.GroupVersion.Group, + Name: name, + Kind: "AWSManagedControlPlane", }, - InfrastructureRef: &corev1.ObjectReference{ - APIVersion: infrav1.GroupVersion.String(), - Name: name, - Kind: "AWSManagedCluster", - Namespace: namespace, + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + APIGroup: infrav1.GroupVersion.Group, + Name: name, + Kind: "AWSManagedCluster", }, }, } diff --git a/controlplane/eks/controllers/suite_test.go b/controlplane/eks/controllers/suite_test.go index c284f3dec2..360f4fbf2c 100644 --- a/controlplane/eks/controllers/suite_test.go +++ b/controlplane/eks/controllers/suite_test.go @@ -29,7 +29,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) var ( diff --git a/controlplane/rosa/api/v1beta2/conditions_consts.go b/controlplane/rosa/api/v1beta2/conditions_consts.go index 8bb0f50427..4229bedb81 100644 --- a/controlplane/rosa/api/v1beta2/conditions_consts.go +++ b/controlplane/rosa/api/v1beta2/conditions_consts.go @@ -16,20 +16,23 @@ limitations under the License. package v1beta2 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +import clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" const ( // ROSAControlPlaneReadyCondition condition reports on the successful reconciliation of ROSAControlPlane. - ROSAControlPlaneReadyCondition clusterv1.ConditionType = "ROSAControlPlaneReady" + ROSAControlPlaneReadyCondition clusterv1beta1.ConditionType = "ROSAControlPlaneReady" // ROSAControlPlaneValidCondition condition reports whether ROSAControlPlane configuration is valid. - ROSAControlPlaneValidCondition clusterv1.ConditionType = "ROSAControlPlaneValid" + ROSAControlPlaneValidCondition clusterv1beta1.ConditionType = "ROSAControlPlaneValid" // ROSAControlPlaneUpgradingCondition condition reports whether ROSAControlPlane is upgrading or not. - ROSAControlPlaneUpgradingCondition clusterv1.ConditionType = "ROSAControlPlaneUpgrading" + ROSAControlPlaneUpgradingCondition clusterv1beta1.ConditionType = "ROSAControlPlaneUpgrading" // ExternalAuthConfiguredCondition condition reports whether external auth has beed correctly configured. - ExternalAuthConfiguredCondition clusterv1.ConditionType = "ExternalAuthConfigured" + ExternalAuthConfiguredCondition clusterv1beta1.ConditionType = "ExternalAuthConfigured" + + // ROSARoleConfigReadyCondition condition reports whether the referenced RosaRoleConfig is ready. + ROSARoleConfigReadyCondition clusterv1beta1.ConditionType = "ROSARoleConfigReady" // ReconciliationFailedReason used to report reconciliation failures. ReconciliationFailedReason = "ReconciliationFailed" @@ -39,4 +42,10 @@ const ( // ROSAControlPlaneInvalidConfigurationReason used to report invalid user input. ROSAControlPlaneInvalidConfigurationReason = "InvalidConfiguration" + + // ROSARoleConfigNotReadyReason used to report when referenced RosaRoleConfig is not ready. + ROSARoleConfigNotReadyReason = "ROSARoleConfigNotReady" + + // ROSARoleConfigNotFoundReason used to report when referenced RosaRoleConfig is not found. + ROSARoleConfigNotFoundReason = "ROSARoleConfigNotFound" ) diff --git a/controlplane/rosa/api/v1beta2/rosacontrolplane_types.go b/controlplane/rosa/api/v1beta2/rosacontrolplane_types.go index c8a99cea6a..4be22a8dec 100644 --- a/controlplane/rosa/api/v1beta2/rosacontrolplane_types.go +++ b/controlplane/rosa/api/v1beta2/rosacontrolplane_types.go @@ -21,8 +21,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // RosaEndpointAccessType specifies the publishing scope of cluster endpoints. @@ -72,6 +71,17 @@ const ( Nightly ChannelGroupType = "nightly" ) +// AutoNodeMode specifies the AutoNode mode for the ROSA Control Plane. +type AutoNodeMode string + +const ( + // AutoNodeModeEnabled enable AutoNode + AutoNodeModeEnabled AutoNodeMode = "Enabled" + + // AutoNodeModeDisabled Disabled AutoNode + AutoNodeModeDisabled AutoNodeMode = "Disabled" +) + // RosaControlPlaneSpec defines the desired state of ROSAControlPlane. type RosaControlPlaneSpec struct { //nolint: maligned // Cluster name must be valid DNS-1035 label, so it must consist of lower case alphanumeric @@ -98,12 +108,14 @@ type RosaControlPlaneSpec struct { //nolint: maligned // The Subnet IDs to use when installing the cluster. // SubnetIDs should come in pairs; two per availability zone, one private and one public. - Subnets []string `json:"subnets"` + // +optional + Subnets []string `json:"subnets,omitempty"` // AvailabilityZones describe AWS AvailabilityZones of the worker nodes. // should match the AvailabilityZones of the provided Subnets. // a machinepool will be created for each availabilityZone. - AvailabilityZones []string `json:"availabilityZones"` + // +optional + AvailabilityZones []string `json:"availabilityZones,omitempty"` // The AWS Region the cluster lives in. Region string `json:"region"` @@ -127,13 +139,23 @@ type RosaControlPlaneSpec struct { //nolint: maligned // +kubebuilder:default=WaitForAcknowledge VersionGate VersionGateAckType `json:"versionGate"` + // RosaRoleConfigRef is a reference to a RosaRoleConfig resource that contains account roles, operator roles and OIDC configuration. + // RosaRoleConfigRef and role fields such as installerRoleARN, supportRoleARN, workerRoleARN, rolesRef and oidcID are mutually exclusive. + // + // +optional + RosaRoleConfigRef *corev1.LocalObjectReference `json:"rosaRoleConfigRef,omitempty"` + // AWS IAM roles used to perform credential requests by the openshift operators. - RolesRef AWSRolesRef `json:"rolesRef"` + // Required if RosaRoleConfigRef is not specified. + // +optional + RolesRef AWSRolesRef `json:"rolesRef,omitempty"` // The ID of the internal OpenID Connect Provider. + // Required if RosaRoleConfigRef is not specified. // // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="oidcID is immutable" - OIDCID string `json:"oidcID"` + // +optional + OIDCID string `json:"oidcID,omitempty"` // EnableExternalAuthProviders enables external authentication configuration for the cluster. // @@ -152,13 +174,19 @@ type RosaControlPlaneSpec struct { //nolint: maligned // +kubebuilder:validation:MaxItems=1 ExternalAuthProviders []ExternalAuthProvider `json:"externalAuthProviders,omitempty"` - // InstallerRoleARN is an AWS IAM role that OpenShift Cluster Manager will assume to create the cluster.. - InstallerRoleARN string `json:"installerRoleARN"` + // InstallerRoleARN is an AWS IAM role that OpenShift Cluster Manager will assume to create the cluster. + // Required if RosaRoleConfigRef is not specified. + // +optional + InstallerRoleARN string `json:"installerRoleARN,omitempty"` // SupportRoleARN is an AWS IAM role used by Red Hat SREs to enable // access to the cluster account in order to provide support. - SupportRoleARN string `json:"supportRoleARN"` + // Required if RosaRoleConfigRef is not specified. + // +optional + SupportRoleARN string `json:"supportRoleARN,omitempty"` // WorkerRoleARN is an AWS IAM role that will be attached to worker instances. - WorkerRoleARN string `json:"workerRoleARN"` + // Required if RosaRoleConfigRef is not specified. + // +optional + WorkerRoleARN string `json:"workerRoleARN,omitempty"` // BillingAccount is an optional AWS account to use for billing the subscription fees for ROSA HCP clusters. // The cost of running each ROSA HCP cluster will be billed to the infrastructure account in which the cluster @@ -229,11 +257,35 @@ type RosaControlPlaneSpec struct { //nolint: maligned // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. // +optional - ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"` + ControlPlaneEndpoint clusterv1beta1.APIEndpoint `json:"controlPlaneEndpoint"` // ClusterRegistryConfig represents registry config used with the cluster. // +optional ClusterRegistryConfig *RegistryConfig `json:"clusterRegistryConfig,omitempty"` + + // autoNode set the autoNode mode and roleARN. + // +optional + AutoNode *AutoNode `json:"autoNode,omitempty"` + + // ROSANetworkRef references ROSANetwork custom resource that contains the networking infrastructure + // for the ROSA HCP cluster. + // +optional + ROSANetworkRef *corev1.LocalObjectReference `json:"rosaNetworkRef,omitempty"` +} + +// AutoNode set the AutoNode mode and AutoNode role ARN. +type AutoNode struct { + // mode specifies the mode for the AutoNode. Setting Enable/Disable mode will allows/disallow karpenter AutoNode scaling. + // +kubebuilder:validation:Enum=Enabled;Disabled + // +kubebuilder:default=Disabled + // +optional + Mode AutoNodeMode `json:"mode,omitempty"` + + // roleARN sets the autoNode role ARN, which includes the IAM policy and cluster-specific role that grant the necessary permissions to the Karpenter controller. + // The role must be attached with the same OIDC-ID that is used with the ROSA-HCP cluster. + // +kubebuilder:validation:MaxLength:=2048 + // +optional + RoleARN string `json:"roleARN,omitempty"` } // RegistryConfig for ROSA-HCP cluster @@ -333,7 +385,7 @@ type DefaultMachinePoolSpec struct { // Autoscaling specifies auto scaling behaviour for the default MachinePool. Autoscaling min/max value // must be equal or multiple of the availability zones count. // +optional - Autoscaling *expinfrav1.RosaMachinePoolAutoScaling `json:"autoscaling,omitempty"` + Autoscaling *AutoScaling `json:"autoscaling,omitempty"` // VolumeSize set the disk volume size for the default workers machine pool in Gib. The default is 300 GiB. // +kubebuilder:validation:Minimum=75 @@ -343,6 +395,14 @@ type DefaultMachinePoolSpec struct { VolumeSize int `json:"volumeSize,omitempty"` } +// AutoScaling specifies scaling options. +type AutoScaling struct { + // +kubebuilder:validation:Minimum=1 + MinReplicas int `json:"minReplicas,omitempty"` + // +kubebuilder:validation:Minimum=1 + MaxReplicas int `json:"maxReplicas,omitempty"` +} + // AWSRolesRef contains references to various AWS IAM roles required for operators to make calls against the AWS API. type AWSRolesRef struct { // The referenced role must have a trust relationship that allows it to be assumed via web identity. @@ -746,7 +806,7 @@ type RosaControlPlaneStatus struct { // +optional FailureMessage *string `json:"failureMessage,omitempty"` // Conditions specifies the conditions for the managed control plane - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // ID is the cluster ID given by ROSA. ID string `json:"id,omitempty"` @@ -790,12 +850,12 @@ type ROSAControlPlaneList struct { } // GetConditions returns the control planes conditions. -func (r *ROSAControlPlane) GetConditions() clusterv1.Conditions { +func (r *ROSAControlPlane) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } // SetConditions sets the status conditions for the AWSManagedControlPlane. -func (r *ROSAControlPlane) SetConditions(conditions clusterv1.Conditions) { +func (r *ROSAControlPlane) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/controlplane/rosa/api/v1beta2/rosacontrolplane_webhook.go b/controlplane/rosa/api/v1beta2/rosacontrolplane_webhook.go index 56071a878e..21618bcec3 100644 --- a/controlplane/rosa/api/v1beta2/rosacontrolplane_webhook.go +++ b/controlplane/rosa/api/v1beta2/rosacontrolplane_webhook.go @@ -1,3 +1,19 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package v1beta2 import ( @@ -58,9 +74,17 @@ func (*rosaControlPlaneWebhook) ValidateCreate(_ context.Context, obj runtime.Ob allErrs = append(allErrs, err) } + if err := r.validateRosaRoleConfig(); err != nil { + allErrs = append(allErrs, err) + } + allErrs = append(allErrs, r.validateNetwork()...) allErrs = append(allErrs, r.Spec.AdditionalTags.Validate()...) + if err := r.validateROSANetwork(); err != nil { + allErrs = append(allErrs, err) + } + if len(allErrs) == 0 { return nil, nil } @@ -101,6 +125,10 @@ func (*rosaControlPlaneWebhook) ValidateUpdate(_ context.Context, oldObj, newObj allErrs = append(allErrs, err) } + if err := r.validateRosaRoleConfig(); err != nil { + allErrs = append(allErrs, err) + } + allErrs = append(allErrs, r.validateNetwork()...) allErrs = append(allErrs, r.Spec.AdditionalTags.Validate()...) @@ -179,6 +207,79 @@ func (r *ROSAControlPlane) validateExternalAuthProviders() *field.Error { return nil } +func (r *ROSAControlPlane) validateRosaRoleConfig() *field.Error { + hasRoleFields := r.Spec.OIDCID != "" || r.Spec.InstallerRoleARN != "" || r.Spec.SupportRoleARN != "" || r.Spec.WorkerRoleARN != "" || + r.Spec.RolesRef.IngressARN != "" || r.Spec.RolesRef.ImageRegistryARN != "" || r.Spec.RolesRef.StorageARN != "" || + r.Spec.RolesRef.NetworkARN != "" || r.Spec.RolesRef.KubeCloudControllerARN != "" || r.Spec.RolesRef.NodePoolManagementARN != "" || + r.Spec.RolesRef.ControlPlaneOperatorARN != "" || r.Spec.RolesRef.KMSProviderARN != "" + + if r.Spec.RosaRoleConfigRef != nil { + if hasRoleFields { + return field.Invalid(field.NewPath("spec.rosaRoleConfigRef"), r.Spec.RosaRoleConfigRef, "RosaRoleConfigRef and role fields such as installerRoleARN, supportRoleARN, workerRoleARN, rolesRef and oidcID are mutually exclusive") + } + return nil + } + + if r.Spec.OIDCID == "" { + return field.Invalid(field.NewPath("spec.oidcID"), r.Spec.OIDCID, "must be specified") + } + if r.Spec.InstallerRoleARN == "" { + return field.Invalid(field.NewPath("spec.installerRoleARN"), r.Spec.InstallerRoleARN, "must be specified") + } + if r.Spec.SupportRoleARN == "" { + return field.Invalid(field.NewPath("spec.supportRoleARN"), r.Spec.SupportRoleARN, "must be specified") + } + if r.Spec.WorkerRoleARN == "" { + return field.Invalid(field.NewPath("spec.workerRoleARN"), r.Spec.WorkerRoleARN, "must be specified") + } + if r.Spec.RolesRef.IngressARN == "" { + return field.Invalid(field.NewPath("spec.rolesRef.ingressARN"), r.Spec.RolesRef.IngressARN, "must be specified") + } + if r.Spec.RolesRef.ImageRegistryARN == "" { + return field.Invalid(field.NewPath("spec.rolesRef.imageRegistryARN"), r.Spec.RolesRef.ImageRegistryARN, "must be specified") + } + if r.Spec.RolesRef.StorageARN == "" { + return field.Invalid(field.NewPath("spec.rolesRef.storageARN"), r.Spec.RolesRef.StorageARN, "must be specified") + } + if r.Spec.RolesRef.NetworkARN == "" { + return field.Invalid(field.NewPath("spec.rolesRef.networkARN"), r.Spec.RolesRef.NetworkARN, "must be specified") + } + if r.Spec.RolesRef.KubeCloudControllerARN == "" { + return field.Invalid(field.NewPath("spec.rolesRef.kubeCloudControllerARN"), r.Spec.RolesRef.KubeCloudControllerARN, "must be specified") + } + if r.Spec.RolesRef.NodePoolManagementARN == "" { + return field.Invalid(field.NewPath("spec.rolesRef.nodePoolManagementARN"), r.Spec.RolesRef.NodePoolManagementARN, "must be specified") + } + if r.Spec.RolesRef.ControlPlaneOperatorARN == "" { + return field.Invalid(field.NewPath("spec.rolesRef.controlPlaneOperatorARN"), r.Spec.RolesRef.ControlPlaneOperatorARN, "must be specified") + } + if r.Spec.RolesRef.KMSProviderARN == "" { + return field.Invalid(field.NewPath("spec.rolesRef.kmsProviderARN"), r.Spec.RolesRef.KMSProviderARN, "must be specified") + } + return nil +} + +func (r *ROSAControlPlane) validateROSANetwork() *field.Error { + if r.Spec.ROSANetworkRef != nil { + if r.Spec.Subnets != nil { + return field.Forbidden(field.NewPath("spec.rosaNetworkRef"), "spec.subnets and spec.rosaNetworkRef are mutually exclusive") + } + if r.Spec.AvailabilityZones != nil { + return field.Forbidden(field.NewPath("spec.rosaNetworkRef"), "spec.availabilityZones and spec.rosaNetworkRef are mutually exclusive") + } + } + + if r.Spec.ROSANetworkRef == nil && r.Spec.Subnets == nil { + return field.Required(field.NewPath("spec.subnets"), "spec.subnets cannot be empty when spec.rosaNetworkRef is unspecified") + } + + if r.Spec.ROSANetworkRef == nil && r.Spec.AvailabilityZones == nil { + return field.Required(field.NewPath("spec.availabilityZones"), "spec.availabilityZones cannot be empty when spec.rosaNetworkRef is unspecified") + } + + return nil +} + // Default implements admission.Defaulter. func (*rosaControlPlaneWebhook) Default(_ context.Context, obj runtime.Object) error { r, ok := obj.(*ROSAControlPlane) diff --git a/controlplane/rosa/api/v1beta2/rosacontrolplane_webhook_test.go b/controlplane/rosa/api/v1beta2/rosacontrolplane_webhook_test.go new file mode 100644 index 0000000000..2ec2a5f115 --- /dev/null +++ b/controlplane/rosa/api/v1beta2/rosacontrolplane_webhook_test.go @@ -0,0 +1,81 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "testing" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" +) + +func TestValidateROSANetwork(t *testing.T) { + g := NewGomegaWithT(t) + + rosaCP := &ROSAControlPlane{ + Spec: RosaControlPlaneSpec{}, + Status: RosaControlPlaneStatus{}, + } + + t.Run("Validation error when no ROSANetworkRef, no subnets, no AZs", func(t *testing.T) { + err := rosaCP.validateROSANetwork() + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("spec.subnets cannot be empty")) + }) + + t.Run("Validation error when no ROSANetworkRef, subnets present, no AZs", func(t *testing.T) { + rosaCP.Spec.Subnets = []string{"subnet01", "subnet02"} + err := rosaCP.validateROSANetwork() + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("spec.availabilityZones cannot be empty")) + }) + + t.Run("Validation succeeds when no ROSANetworkRef, subnets and AZs are present", func(t *testing.T) { + rosaCP.Spec.AvailabilityZones = []string{"AZ01", "AZ02"} + err := rosaCP.validateROSANetwork() + g.Expect(err).NotTo(HaveOccurred()) + }) + + t.Run("Validation error when ROSANetworkRef, subnets and AZs are present", func(t *testing.T) { + rosaCP.Spec.ROSANetworkRef = &corev1.LocalObjectReference{} + err := rosaCP.validateROSANetwork() + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("spec.subnets and spec.rosaNetworkRef are mutually exclusive")) + }) + + t.Run("Validation error when ROSANetworkRef and subnets are present, no AZs", func(t *testing.T) { + rosaCP.Spec.AvailabilityZones = nil + err := rosaCP.validateROSANetwork() + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("spec.subnets and spec.rosaNetworkRef are mutually exclusive")) + }) + + t.Run("Validation error when ROSANetworkRef and AZs are present, no subnets", func(t *testing.T) { + rosaCP.Spec.AvailabilityZones = []string{"AZ01", "AZ02"} + rosaCP.Spec.Subnets = nil + err := rosaCP.validateROSANetwork() + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring("spec.availabilityZones and spec.rosaNetworkRef are mutually exclusive")) + }) + + t.Run("Validation succeeds when ROSANetworkRef is present, no subnets and no AZs", func(t *testing.T) { + rosaCP.Spec.AvailabilityZones = nil + rosaCP.Spec.Subnets = nil + err := rosaCP.validateROSANetwork() + g.Expect(err).NotTo(HaveOccurred()) + }) +} diff --git a/controlplane/rosa/api/v1beta2/zz_generated.deepcopy.go b/controlplane/rosa/api/v1beta2/zz_generated.deepcopy.go index 3e4dfdf8cf..8c6e718edb 100644 --- a/controlplane/rosa/api/v1beta2/zz_generated.deepcopy.go +++ b/controlplane/rosa/api/v1beta2/zz_generated.deepcopy.go @@ -24,8 +24,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" apiv1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - expapiv1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" - "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -43,12 +42,42 @@ func (in *AWSRolesRef) DeepCopy() *AWSRolesRef { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoNode) DeepCopyInto(out *AutoNode) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoNode. +func (in *AutoNode) DeepCopy() *AutoNode { + if in == nil { + return nil + } + out := new(AutoNode) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoScaling) DeepCopyInto(out *AutoScaling) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoScaling. +func (in *AutoScaling) DeepCopy() *AutoScaling { + if in == nil { + return nil + } + out := new(AutoScaling) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DefaultMachinePoolSpec) DeepCopyInto(out *DefaultMachinePoolSpec) { *out = *in if in.Autoscaling != nil { in, out := &in.Autoscaling, &out.Autoscaling - *out = new(expapiv1beta2.RosaMachinePoolAutoScaling) + *out = new(AutoScaling) **out = **in } } @@ -311,6 +340,11 @@ func (in *RosaControlPlaneSpec) DeepCopyInto(out *RosaControlPlaneSpec) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.RosaRoleConfigRef != nil { + in, out := &in.RosaRoleConfigRef, &out.RosaRoleConfigRef + *out = new(v1.LocalObjectReference) + **out = **in + } out.RolesRef = in.RolesRef if in.ExternalAuthProviders != nil { in, out := &in.ExternalAuthProviders, &out.ExternalAuthProviders @@ -348,6 +382,16 @@ func (in *RosaControlPlaneSpec) DeepCopyInto(out *RosaControlPlaneSpec) { *out = new(RegistryConfig) (*in).DeepCopyInto(*out) } + if in.AutoNode != nil { + in, out := &in.AutoNode, &out.AutoNode + *out = new(AutoNode) + **out = **in + } + if in.ROSANetworkRef != nil { + in, out := &in.ROSANetworkRef, &out.ROSANetworkRef + *out = new(v1.LocalObjectReference) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RosaControlPlaneSpec. diff --git a/controlplane/rosa/controllers/rosacontrolplane_controller.go b/controlplane/rosa/controllers/rosacontrolplane_controller.go index 90426d2aab..a87b7a9a1d 100644 --- a/controlplane/rosa/controllers/rosacontrolplane_controller.go +++ b/controlplane/rosa/controllers/rosacontrolplane_controller.go @@ -65,9 +65,10 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/utils" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/kubeconfig" "sigs.k8s.io/cluster-api/util/predicates" "sigs.k8s.io/cluster-api/util/secret" @@ -108,7 +109,6 @@ func (r *ROSAControlPlaneReconciler) SetupWithManager(ctx context.Context, mgr c WithOptions(options). WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), log.GetLogger(), r.WatchFilterValue)). Build(r) - if err != nil { return fmt.Errorf("failed setting up the ROSAControlPlane controller manager: %w", err) } @@ -116,7 +116,7 @@ func (r *ROSAControlPlaneReconciler) SetupWithManager(ctx context.Context, mgr c if err = c.Watch( source.Kind[client.Object](mgr.GetCache(), &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, rosaControlPlane.GroupVersionKind(), mgr.GetClient(), &expinfrav1.ROSACluster{})), - predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), log.GetLogger())), + predicates.ClusterPausedTransitionsOrInfrastructureProvisioned(mgr.GetScheme(), log.GetLogger())), ); err != nil { return fmt.Errorf("failed adding a watch for ready clusters: %w", err) } @@ -141,6 +141,8 @@ func (r *ROSAControlPlaneReconciler) SetupWithManager(ctx context.Context, mgr c // +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=rosacontrolplanes,verbs=get;list;watch;update;patch;delete // +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=rosacontrolplanes/status,verbs=get;update;patch // +kubebuilder:rbac:groups=controlplane.cluster.x-k8s.io,resources=rosacontrolplanes/finalizers,verbs=update +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=rosaroleconfigs,verbs=get;list;watch; +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=rosaroleconfigs/status,verbs=get; // Reconcile will reconcile RosaControlPlane Resources. func (r *ROSAControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, reterr error) { @@ -167,7 +169,6 @@ func (r *ROSAControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.Req } log = log.WithValues("cluster", klog.KObj(cluster)) - if isPaused, conditionChanged, err := paused.EnsurePausedCondition(ctx, r.Client, cluster, rosaControlPlane); err != nil || isPaused || conditionChanged { return ctrl.Result{}, err } @@ -227,17 +228,23 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc return ctrl.Result{}, fmt.Errorf("failed to transform caller identity to creator: %w", err) } - validationMessage, err := validateControlPlaneSpec(ocmClient, rosaScope) + rosaRoleConfig, err := r.reconcileRosaRoleConfig(ctx, rosaScope) + if err != nil { + rosaScope.Error(err, "cannot reconcile RosaRoleConfig ") + return ctrl.Result{}, err + } + + validationMessage, err := validateControlPlaneSpec(ocmClient, rosaScope.ControlPlane) if err != nil { return ctrl.Result{}, fmt.Errorf("failed to validate ROSAControlPlane.spec: %w", err) } - conditions.MarkTrue(rosaScope.ControlPlane, rosacontrolplanev1.ROSAControlPlaneValidCondition) + v1beta1conditions.MarkTrue(rosaScope.ControlPlane, rosacontrolplanev1.ROSAControlPlaneValidCondition) if validationMessage != "" { - conditions.MarkFalse(rosaScope.ControlPlane, + v1beta1conditions.MarkFalse(rosaScope.ControlPlane, rosacontrolplanev1.ROSAControlPlaneValidCondition, rosacontrolplanev1.ROSAControlPlaneInvalidConfigurationReason, - clusterv1.ConditionSeverityError, + clusterv1beta1.ConditionSeverityError, "%s", validationMessage) // dont' requeue because input is invalid and manual intervention is needed. @@ -259,7 +266,7 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc switch cluster.Status().State() { case cmv1.ClusterStateReady: - conditions.MarkTrue(rosaScope.ControlPlane, rosacontrolplanev1.ROSAControlPlaneReadyCondition) + v1beta1conditions.MarkTrue(rosaScope.ControlPlane, rosacontrolplanev1.ROSAControlPlaneReadyCondition) rosaScope.ControlPlane.Status.Ready = true apiEndpoint, err := buildAPIEndpoint(cluster) @@ -292,20 +299,20 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc errorMessage := cluster.Status().ProvisionErrorMessage() rosaScope.ControlPlane.Status.FailureMessage = &errorMessage - conditions.MarkFalse(rosaScope.ControlPlane, + v1beta1conditions.MarkFalse(rosaScope.ControlPlane, rosacontrolplanev1.ROSAControlPlaneReadyCondition, string(cluster.Status().State()), - clusterv1.ConditionSeverityError, + clusterv1beta1.ConditionSeverityError, "%s", cluster.Status().ProvisionErrorCode()) // Cluster is in an unrecoverable state, returning nil error so that the request doesn't get requeued. return ctrl.Result{}, nil } - conditions.MarkFalse(rosaScope.ControlPlane, + v1beta1conditions.MarkFalse(rosaScope.ControlPlane, rosacontrolplanev1.ROSAControlPlaneReadyCondition, string(cluster.Status().State()), - clusterv1.ConditionSeverityInfo, + clusterv1beta1.ConditionSeverityInfo, "%s", cluster.Status().Description()) @@ -314,17 +321,37 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc return ctrl.Result{RequeueAfter: time.Second * 60}, nil } - ocmClusterSpec, err := buildOCMClusterSpec(rosaScope.ControlPlane.Spec, creator) + rosaNet := &expinfrav1.ROSANetwork{} + // Does the control plane reference ROSANetwork? + if rosaScope.ControlPlane.Spec.ROSANetworkRef != nil { + objKey := client.ObjectKey{ + Name: rosaScope.ControlPlane.Spec.ROSANetworkRef.Name, + Namespace: rosaScope.ControlPlane.Namespace, + } + + err := rosaScope.Client.Get(ctx, objKey, rosaNet) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to fetch ROSANetwork: %w", err) + } + + // Is the referenced ROSANetwork ready yet? + if !v1beta1conditions.IsTrue(rosaNet, expinfrav1.ROSANetworkReadyCondition) { + rosaScope.Info(fmt.Sprintf("referenced ROSANetwork %s is not ready", rosaNet.Name)) + return ctrl.Result{RequeueAfter: time.Minute}, nil + } + } + + ocmClusterSpec, err := buildOCMClusterSpec(rosaScope.ControlPlane.Spec, rosaRoleConfig, rosaNet, creator) if err != nil { return ctrl.Result{}, err } cluster, err = ocmClient.CreateCluster(ocmClusterSpec) if err != nil { - conditions.MarkFalse(rosaScope.ControlPlane, + v1beta1conditions.MarkFalse(rosaScope.ControlPlane, rosacontrolplanev1.ROSAControlPlaneReadyCondition, rosacontrolplanev1.ReconciliationFailedReason, - clusterv1.ConditionSeverityError, + clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return ctrl.Result{}, fmt.Errorf("failed to create OCM cluster: %w", err) @@ -336,6 +363,48 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc return ctrl.Result{}, nil } +func (r *ROSAControlPlaneReconciler) reconcileRosaRoleConfig(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (*expinfrav1.ROSARoleConfig, error) { + rosaRoleConfig := &expinfrav1.ROSARoleConfig{} + // Get role configuration from either RosaRoleConfig or direct fields + if rosaScope.ControlPlane.Spec.RosaRoleConfigRef != nil { + // Get RosaRoleConfig + key := client.ObjectKey{ + Name: rosaScope.ControlPlane.Spec.RosaRoleConfigRef.Name, + Namespace: rosaScope.ControlPlane.Namespace, + } + + if err := r.Client.Get(ctx, key, rosaRoleConfig); err != nil { + v1beta1conditions.MarkFalse(rosaScope.ControlPlane, + rosacontrolplanev1.ROSARoleConfigReadyCondition, + rosacontrolplanev1.ROSARoleConfigNotFoundReason, + clusterv1beta1.ConditionSeverityError, + "Failed to get RosaRoleConfig %s/%s", rosaScope.ControlPlane.Namespace, rosaScope.ControlPlane.Spec.RosaRoleConfigRef.Name) + + return nil, err + } + + // Check if RosaRoleConfig is ready + if !v1beta1conditions.IsTrue(rosaRoleConfig, expinfrav1.RosaRoleConfigReadyCondition) { + v1beta1conditions.MarkFalse(rosaScope.ControlPlane, + rosacontrolplanev1.ROSARoleConfigReadyCondition, + rosacontrolplanev1.ROSARoleConfigNotReadyReason, + clusterv1beta1.ConditionSeverityWarning, + "RosaRoleConfig %s/%s is not ready", rosaScope.ControlPlane.Namespace, rosaScope.ControlPlane.Spec.RosaRoleConfigRef.Name) + + return nil, fmt.Errorf("RosaRoleConfig %s/%s is not ready", rosaScope.ControlPlane.Namespace, rosaScope.ControlPlane.Spec.RosaRoleConfigRef.Name) + } + v1beta1conditions.MarkTrue(rosaScope.ControlPlane, rosacontrolplanev1.ROSARoleConfigReadyCondition) + } else { + rosaRoleConfig.Status.OIDCID = rosaScope.ControlPlane.Spec.OIDCID + rosaRoleConfig.Status.AccountRolesRef.InstallerRoleARN = rosaScope.ControlPlane.Spec.InstallerRoleARN + rosaRoleConfig.Status.AccountRolesRef.SupportRoleARN = rosaScope.ControlPlane.Spec.SupportRoleARN + rosaRoleConfig.Status.AccountRolesRef.WorkerRoleARN = rosaScope.ControlPlane.Spec.WorkerRoleARN + rosaRoleConfig.Status.OperatorRolesRef = rosaScope.ControlPlane.Spec.RolesRef + } + + return rosaRoleConfig, nil +} + func (r *ROSAControlPlaneReconciler) reconcileDelete(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (res ctrl.Result, reterr error) { rosaScope.Info("Reconciling ROSAControlPlane delete") @@ -382,10 +451,10 @@ func (r *ROSAControlPlaneReconciler) reconcileDelete(ctx context.Context, rosaSc if cluster.Status().State() != cmv1.ClusterStateUninstalling { if _, err := ocmClient.DeleteCluster(cluster.ID(), bestEffort, creator); err != nil { - conditions.MarkFalse(rosaScope.ControlPlane, + v1beta1conditions.MarkFalse(rosaScope.ControlPlane, rosacontrolplanev1.ROSAControlPlaneReadyCondition, rosacontrolplanev1.ROSAControlPlaneDeletionFailedReason, - clusterv1.ConditionSeverityError, + clusterv1beta1.ConditionSeverityError, "failed to delete ROSAControlPlane: %s; if the error can't be resolved, set '%s' annotation to force the deletion", err.Error(), ROSAControlPlaneForceDeleteAnnotation) @@ -393,10 +462,10 @@ func (r *ROSAControlPlaneReconciler) reconcileDelete(ctx context.Context, rosaSc } } - conditions.MarkFalse(rosaScope.ControlPlane, + v1beta1conditions.MarkFalse(rosaScope.ControlPlane, rosacontrolplanev1.ROSAControlPlaneReadyCondition, string(cluster.Status().State()), - clusterv1.ConditionSeverityInfo, + clusterv1beta1.ConditionSeverityInfo, "deleting") rosaScope.ControlPlane.Status.Ready = false rosaScope.Info("waiting for cluster to be deleted") @@ -412,6 +481,7 @@ func (r *ROSAControlPlaneReconciler) deleteMachinePools(ctx context.Context, ros } var errs []error + allMachinePoolDeleted := true for id, mp := range machinePools { if !mp.DeletionTimestamp.IsZero() { continue @@ -419,28 +489,28 @@ func (r *ROSAControlPlaneReconciler) deleteMachinePools(ctx context.Context, ros if err = rosaScope.Client.Delete(ctx, &machinePools[id]); err != nil { errs = append(errs, err) } - } - - // Workaround the case where last machinePool cannot be deleted without deleting the ROSA controlplane. - // In Cluster API (CAPI), machine pools (MPs) are normally deleted before the control plane is removed. - // However, in ROSA-HCP, deleting the final MP results in an error because the control plane cannot exist without at least 1 MP. - // To handle this, when only one MP remains, we ignore the deletion error and proceed with deleting the control plane. - // Also OCM cascade delete the MPs when deleting control plane, so we are safe to ignore last MP and delete the control plane. - if len(errs) == 0 && len(machinePools) == 1 { - return true, nil + allMachinePoolDeleted = false } if len(errs) > 0 { return false, kerrors.NewAggregate(errs) } - return len(machinePools) == 0, nil + // Workaround: Handle the dependency issue between MachinePools and the ROSA control plane. + // In Cluster API (CAPI), MachinePools (MPs) are typically deleted before the control plane is deprovisioned. + // However, in ROSA-HCP, a cluster cannot exist without MachinePools, which causes an error when attempting + // to delete them first — preventing the ROSAControlPlane from being removed. + // To resolve this, we initiate the deletion of the MachinePool CRs, wait for one reconcile cycle, + // and then proceed to delete the ROSA-HCP control plane. + // OCM will automatically cascade the deletion of NodePools before the control plane is deleted. + + return allMachinePoolDeleted, nil } func (r *ROSAControlPlaneReconciler) reconcileClusterVersion(rosaScope *scope.ROSAControlPlaneScope, ocmClient rosa.OCMClient, cluster *cmv1.Cluster) error { version := rosaScope.ControlPlane.Spec.Version if version == rosa.RawVersionID(cluster.Version()) { - conditions.MarkFalse(rosaScope.ControlPlane, rosacontrolplanev1.ROSAControlPlaneUpgradingCondition, "upgraded", clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(rosaScope.ControlPlane, rosacontrolplanev1.ROSAControlPlaneUpgradingCondition, "upgraded", clusterv1beta1.ConditionSeverityInfo, "") if cluster.Version() != nil { rosaScope.ControlPlane.Status.AvailableUpgrades = cluster.Version().AvailableUpgrades() @@ -464,25 +534,25 @@ func (r *ROSAControlPlaneReconciler) reconcileClusterVersion(rosaScope *scope.RO ack := (rosaScope.ControlPlane.Spec.VersionGate == rosacontrolplanev1.Acknowledge || rosaScope.ControlPlane.Spec.VersionGate == rosacontrolplanev1.AlwaysAcknowledge) scheduledUpgrade, err = rosa.ScheduleControlPlaneUpgrade(ocmClient, cluster, version, time.Now(), ack) if err != nil { - condition := &clusterv1.Condition{ + condition := &clusterv1beta1.Condition{ Type: rosacontrolplanev1.ROSAControlPlaneUpgradingCondition, Status: corev1.ConditionFalse, Reason: "failed", Message: fmt.Sprintf("failed to schedule upgrade to version %s: %v", version, err), } - conditions.Set(rosaScope.ControlPlane, condition) + v1beta1conditions.Set(rosaScope.ControlPlane, condition) return err } } - condition := &clusterv1.Condition{ + condition := &clusterv1beta1.Condition{ Type: rosacontrolplanev1.ROSAControlPlaneUpgradingCondition, Status: corev1.ConditionTrue, Reason: string(scheduledUpgrade.State().Value()), Message: fmt.Sprintf("Upgrading to version %s", scheduledUpgrade.Version()), } - conditions.Set(rosaScope.ControlPlane, condition) + v1beta1conditions.Set(rosaScope.ControlPlane, condition) // if cluster is already upgrading to another version we need to wait until the current upgrade is finished, return an error to requeue and try later. if scheduledUpgrade.Version() != version { @@ -499,10 +569,10 @@ func (r *ROSAControlPlaneReconciler) updateOCMCluster(rosaScope *scope.ROSAContr // Update the cluster. rosaScope.Info("Updating cluster") if err := ocmClient.UpdateCluster(cluster.ID(), creator, ocmClusterSpec); err != nil { - conditions.MarkFalse(rosaScope.ControlPlane, + v1beta1conditions.MarkFalse(rosaScope.ControlPlane, rosacontrolplanev1.ROSAControlPlaneValidCondition, rosacontrolplanev1.ROSAControlPlaneInvalidConfigurationReason, - clusterv1.ConditionSeverityError, + clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return err @@ -578,6 +648,18 @@ func (r *ROSAControlPlaneReconciler) updateOCMClusterSpec(rosaControlPlane *rosa updated = true } + if rosaControlPlane.Spec.AutoNode != nil { + if !strings.EqualFold(ocmClusterSpec.AutoNodeMode, string(rosaControlPlane.Spec.AutoNode.Mode)) { + ocmClusterSpec.AutoNodeMode = strings.ToLower(string(rosaControlPlane.Spec.AutoNode.Mode)) + updated = true + } + + if ocmClusterSpec.AutoNodeRoleARN != rosaControlPlane.Spec.AutoNode.RoleARN { + ocmClusterSpec.AutoNodeRoleARN = rosaControlPlane.Spec.AutoNode.RoleARN + updated = true + } + } + return ocmClusterSpec, updated } @@ -591,14 +673,14 @@ func (r *ROSAControlPlaneReconciler) reconcileExternalAuth(ctx context.Context, var errs []error if err := r.reconcileExternalAuthProviders(ctx, externalAuthClient, rosaScope, cluster); err != nil { errs = append(errs, err) - conditions.MarkFalse(rosaScope.ControlPlane, + v1beta1conditions.MarkFalse(rosaScope.ControlPlane, rosacontrolplanev1.ExternalAuthConfiguredCondition, rosacontrolplanev1.ReconciliationFailedReason, - clusterv1.ConditionSeverityError, + clusterv1beta1.ConditionSeverityError, "%s", err.Error()) } else { - conditions.MarkTrue(rosaScope.ControlPlane, rosacontrolplanev1.ExternalAuthConfiguredCondition) + v1beta1conditions.MarkTrue(rosaScope.ControlPlane, rosacontrolplanev1.ExternalAuthConfiguredCondition) } if err := r.reconcileExternalAuthBootstrapKubeconfig(ctx, externalAuthClient, rosaScope, cluster); err != nil { @@ -909,9 +991,9 @@ func (r *ROSAControlPlaneReconciler) reconcileClusterAdminPassword(ctx context.C return password, nil } -func validateControlPlaneSpec(ocmClient rosa.OCMClient, rosaScope *scope.ROSAControlPlaneScope) (string, error) { - version := rosaScope.ControlPlane.Spec.Version - channelGroup := string(rosaScope.ControlPlane.Spec.ChannelGroup) +func validateControlPlaneSpec(ocmClient rosa.OCMClient, rosaControlPlane *rosacontrolplanev1.ROSAControlPlane) (string, error) { + version := rosaControlPlane.Spec.Version + channelGroup := string(rosaControlPlane.Spec.ChannelGroup) valid, err := ocmClient.ValidateHypershiftVersion(version, channelGroup) if err != nil { return "", fmt.Errorf("error validating version in this channelGroup : %w", err) @@ -920,16 +1002,42 @@ func validateControlPlaneSpec(ocmClient rosa.OCMClient, rosaScope *scope.ROSACon return fmt.Sprintf("this version %s is not supported in this channelGroup", version), nil } + if rosaControlPlane.Spec.AutoNode != nil { + if rosaControlPlane.Spec.AutoNode.Mode == rosacontrolplanev1.AutoNodeModeEnabled && rosaControlPlane.Spec.AutoNode.RoleARN == "" { + return "", fmt.Errorf("error ROSAControlPlane autoNode.roleARN, must be set when autoNode mode is enabled") + } + } + // TODO: add more input validations return "", nil } -func buildOCMClusterSpec(controlPlaneSpec rosacontrolplanev1.RosaControlPlaneSpec, creator *rosaaws.Creator) (ocm.Spec, error) { +func buildOCMClusterSpec(controlPlaneSpec rosacontrolplanev1.RosaControlPlaneSpec, roleConfig *expinfrav1.ROSARoleConfig, rosaNet *expinfrav1.ROSANetwork, creator *rosaaws.Creator) (ocm.Spec, error) { billingAccount := controlPlaneSpec.BillingAccount if billingAccount == "" { billingAccount = creator.AccountID } + var subnetIDs []string + var availabilityZones []string + + if controlPlaneSpec.ROSANetworkRef == nil { + if len(controlPlaneSpec.Subnets) == 0 { + return ocm.Spec{}, fmt.Errorf("RosaControlPlaneSpec.Subnets is empty") + } + if len(controlPlaneSpec.AvailabilityZones) == 0 { + return ocm.Spec{}, fmt.Errorf("RosaControlPlaneSpec.AvailabilityZones is empty") + } + + subnetIDs = controlPlaneSpec.Subnets + availabilityZones = controlPlaneSpec.AvailabilityZones + } else { + for _, v := range rosaNet.Status.Subnets { + subnetIDs = append(subnetIDs, v.PublicSubnet, v.PrivateSubnet) + availabilityZones = append(availabilityZones, v.AvailabilityZone) + } + } + ocmClusterSpec := ocm.Spec{ DryRun: ptr.To(false), Name: controlPlaneSpec.RosaClusterName, @@ -941,18 +1049,18 @@ func buildOCMClusterSpec(controlPlaneSpec rosacontrolplanev1.RosaControlPlaneSpe DisableWorkloadMonitoring: ptr.To(true), DefaultIngress: ocm.NewDefaultIngressSpec(), // n.b. this is a no-op when it's set to the default value ComputeMachineType: controlPlaneSpec.DefaultMachinePoolSpec.InstanceType, - AvailabilityZones: controlPlaneSpec.AvailabilityZones, + AvailabilityZones: availabilityZones, Tags: controlPlaneSpec.AdditionalTags, EtcdEncryption: controlPlaneSpec.EtcdEncryptionKMSARN != "", EtcdEncryptionKMSArn: controlPlaneSpec.EtcdEncryptionKMSARN, - SubnetIds: controlPlaneSpec.Subnets, + SubnetIds: subnetIDs, IsSTS: true, - RoleARN: controlPlaneSpec.InstallerRoleARN, - SupportRoleARN: controlPlaneSpec.SupportRoleARN, - WorkerRoleARN: controlPlaneSpec.WorkerRoleARN, - OperatorIAMRoles: operatorIAMRoles(controlPlaneSpec.RolesRef), - OidcConfigId: controlPlaneSpec.OIDCID, + RoleARN: roleConfig.Status.AccountRolesRef.InstallerRoleARN, + SupportRoleARN: roleConfig.Status.AccountRolesRef.SupportRoleARN, + WorkerRoleARN: roleConfig.Status.AccountRolesRef.WorkerRoleARN, + OperatorIAMRoles: operatorIAMRoles(roleConfig.Status.OperatorRolesRef), + OidcConfigId: roleConfig.Status.OIDCID, Mode: "auto", Hypershift: ocm.Hypershift{ Enabled: true, @@ -1007,8 +1115,8 @@ func buildOCMClusterSpec(controlPlaneSpec rosacontrolplanev1.RosaControlPlaneSpe ocmClusterSpec.Autoscaling = true ocmClusterSpec.MaxReplicas = computeAutoscaling.MaxReplicas ocmClusterSpec.MinReplicas = computeAutoscaling.MinReplicas - } else if len(controlPlaneSpec.AvailabilityZones) > 1 { - ocmClusterSpec.ComputeNodes = len(controlPlaneSpec.AvailabilityZones) + } else if len(ocmClusterSpec.AvailabilityZones) > 1 { + ocmClusterSpec.ComputeNodes = len(ocmClusterSpec.AvailabilityZones) } if controlPlaneSpec.ProvisionShardID != "" { @@ -1039,6 +1147,12 @@ func buildOCMClusterSpec(controlPlaneSpec rosacontrolplanev1.RosaControlPlaneSpe } } + // Set auto node karpenter config + if controlPlaneSpec.AutoNode != nil { + ocmClusterSpec.AutoNodeMode = strings.ToLower(string(controlPlaneSpec.AutoNode.Mode)) + ocmClusterSpec.AutoNodeRoleARN = controlPlaneSpec.AutoNode.RoleARN + } + return ocmClusterSpec, nil } @@ -1111,8 +1225,8 @@ func (r *ROSAControlPlaneReconciler) rosaClusterToROSAControlPlane(log *logger.L } controlPlaneRef := cluster.Spec.ControlPlaneRef - if controlPlaneRef == nil || controlPlaneRef.Kind != rosaControlPlaneKind { - log.Debug("ControlPlaneRef is nil or not ROSAControlPlane, skipping mapping") + if !controlPlaneRef.IsDefined() || controlPlaneRef.Kind != rosaControlPlaneKind { + log.Debug("ControlPlaneRef is not defined or not ROSAControlPlane, skipping mapping") return nil } @@ -1120,14 +1234,14 @@ func (r *ROSAControlPlaneReconciler) rosaClusterToROSAControlPlane(log *logger.L { NamespacedName: types.NamespacedName{ Name: controlPlaneRef.Name, - Namespace: controlPlaneRef.Namespace, + Namespace: cluster.Namespace, }, }, } } } -func buildAPIEndpoint(cluster *cmv1.Cluster) (*clusterv1.APIEndpoint, error) { +func buildAPIEndpoint(cluster *cmv1.Cluster) (*clusterv1beta1.APIEndpoint, error) { parsedURL, err := url.ParseRequestURI(cluster.API().URL()) if err != nil { return nil, err @@ -1142,7 +1256,7 @@ func buildAPIEndpoint(cluster *cmv1.Cluster) (*clusterv1.APIEndpoint, error) { return nil, err } - return &clusterv1.APIEndpoint{ + return &clusterv1beta1.APIEndpoint{ Host: host, Port: int32(port), //#nosec G109 G115 }, nil diff --git a/controlplane/rosa/controllers/rosacontrolplane_controller_test.go b/controlplane/rosa/controllers/rosacontrolplane_controller_test.go index 61b8f9ce52..6e8ba2a320 100644 --- a/controlplane/rosa/controllers/rosacontrolplane_controller_test.go +++ b/controlplane/rosa/controllers/rosacontrolplane_controller_test.go @@ -25,6 +25,7 @@ import ( "net/http" "net/http/httptest" "net/url" + "strings" "testing" "time" @@ -52,8 +53,9 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/patch" ) @@ -181,7 +183,7 @@ func TestUpdateOCMClusterSpec(t *testing.T) { }) // Test case 6: channel group update - t.Run("Update AllowedRegistriesForImport", func(t *testing.T) { + t.Run("Update channel group", func(t *testing.T) { rosaControlPlane := &rosacontrolplanev1.ROSAControlPlane{ Spec: rosacontrolplanev1.RosaControlPlaneSpec{ ChannelGroup: rosacontrolplanev1.Candidate, @@ -203,6 +205,99 @@ func TestUpdateOCMClusterSpec(t *testing.T) { g.Expect(updated).To(BeTrue()) g.Expect(ocmSpec).To(Equal(expectedOCMSpec)) }) + + // Test case 7: AutoNode update + t.Run("Update Auto Node", func(t *testing.T) { + rosaControlPlane := &rosacontrolplanev1.ROSAControlPlane{ + Spec: rosacontrolplanev1.RosaControlPlaneSpec{ + AutoNode: &rosacontrolplanev1.AutoNode{ + Mode: rosacontrolplanev1.AutoNodeModeEnabled, + RoleARN: "autoNodeARN", + }, + }, + } + + mockCluster, _ := v1.NewCluster(). + AutoNode(v1.NewClusterAutoNode().Mode("disabled")). + AWS(v1.NewAWS().AutoNode(v1.NewAwsAutoNode().RoleArn("anyARN"))). + Build() + + expectedOCMSpec := ocm.Spec{ + AutoNodeMode: "enabled", + AutoNodeRoleARN: "autoNodeARN", + } + + reconciler := &ROSAControlPlaneReconciler{} + ocmSpec, updated := reconciler.updateOCMClusterSpec(rosaControlPlane, mockCluster) + + g.Expect(updated).To(BeTrue()) + g.Expect(ocmSpec).To(Equal(expectedOCMSpec)) + }) +} + +func TestValidateControlPlaneSpec(t *testing.T) { + g := NewWithT(t) + + mockCtrl := gomock.NewController(t) + ocmMock := mocks.NewMockOCMClient(mockCtrl) + expect := func(m *mocks.MockOCMClientMockRecorder) { + m.ValidateHypershiftVersion(gomock.Any(), gomock.Any()).DoAndReturn(func(versionRawID string, channelGroup string) (bool, error) { + return true, nil + }).AnyTimes() + } + expect(ocmMock.EXPECT()) + + // Test case 1: AutoNode and Version are set valid + t.Run("AutoNode is valid.", func(t *testing.T) { + rosaControlPlane := &rosacontrolplanev1.ROSAControlPlane{ + Spec: rosacontrolplanev1.RosaControlPlaneSpec{ + AutoNode: &rosacontrolplanev1.AutoNode{ + Mode: rosacontrolplanev1.AutoNodeModeEnabled, + RoleARN: "autoNodeARN", + }, + Version: "4.19.0", + ChannelGroup: rosacontrolplanev1.Stable, + }, + } + str, err := validateControlPlaneSpec(ocmMock, rosaControlPlane) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(str).To(Equal("")) + }) + + // Test case 2: AutoNode is enabled and AutoNode ARN is empty. + t.Run("AutoNode is enabled and AutoNode ARN is empty", func(t *testing.T) { + rosaControlPlane := &rosacontrolplanev1.ROSAControlPlane{ + Spec: rosacontrolplanev1.RosaControlPlaneSpec{ + AutoNode: &rosacontrolplanev1.AutoNode{ + Mode: rosacontrolplanev1.AutoNodeModeEnabled, + RoleARN: "", + }, + Version: "4.19.0", + ChannelGroup: rosacontrolplanev1.Stable, + }, + } + str, err := validateControlPlaneSpec(ocmMock, rosaControlPlane) + g.Expect(err).To(HaveOccurred()) + g.Expect(strings.Contains(err.Error(), "autoNode.roleARN, must be set when autoNode mode is enabled")).To(BeTrue()) + g.Expect(str).To(Equal("")) + }) + + // Test case 3: AutoNode is disabled and AutoNode ARN is empty. + t.Run("AutoNode is disabled and AutoNode ARN is empty.", func(t *testing.T) { + rosaControlPlane := &rosacontrolplanev1.ROSAControlPlane{ + Spec: rosacontrolplanev1.RosaControlPlaneSpec{ + AutoNode: &rosacontrolplanev1.AutoNode{ + Mode: rosacontrolplanev1.AutoNodeModeDisabled, + RoleARN: "", + }, + Version: "4.19.0", + ChannelGroup: rosacontrolplanev1.Stable, + }, + } + str, err := validateControlPlaneSpec(ocmMock, rosaControlPlane) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(str).To(Equal("")) + }) } func TestRosaControlPlaneReconcileStatusVersion(t *testing.T) { @@ -236,7 +331,8 @@ func TestRosaControlPlaneReconcileStatusVersion(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "rosa-control-plane-1", Namespace: ns.Name, - UID: types.UID("rosa-control-plane-1")}, + UID: types.UID("rosa-control-plane-1"), + }, TypeMeta: metav1.TypeMeta{ Kind: "ROSAControlPlane", APIVersion: rosacontrolplanev1.GroupVersion.String(), @@ -250,10 +346,19 @@ func TestRosaControlPlaneReconcileStatusVersion(t *testing.T) { PodCIDR: "10.128.0.0/14", ServiceCIDR: "172.30.0.0/16", }, - Region: "us-east-1", - Version: "4.15.20", - ChannelGroup: "stable", - RolesRef: rosacontrolplanev1.AWSRolesRef{}, + Region: "us-east-1", + Version: "4.15.20", + ChannelGroup: "stable", + RolesRef: rosacontrolplanev1.AWSRolesRef{ + IngressARN: "op-arn1", + ImageRegistryARN: "op-arn2", + StorageARN: "op-arn3", + NetworkARN: "op-arn4", + KubeCloudControllerARN: "op-arn5", + NodePoolManagementARN: "op-arn6", + ControlPlaneOperatorARN: "op-arn7", + KMSProviderARN: "op-arn8", + }, OIDCID: "iodcid1", InstallerRoleARN: "arn1", WorkerRoleARN: "arn2", @@ -269,12 +374,13 @@ func TestRosaControlPlaneReconcileStatusVersion(t *testing.T) { }, Status: rosacontrolplanev1.RosaControlPlaneStatus{ ID: "rosa-control-plane-1", - Conditions: clusterv1.Conditions{clusterv1.Condition{ - Type: "Paused", - Status: "False", - Severity: "", - Reason: "NotPaused", - Message: "", + Conditions: clusterv1beta1.Conditions{clusterv1beta1.Condition{ + Type: "Paused", + Status: "False", + Severity: "", + Reason: "NotPaused", + Message: "", + LastTransitionTime: metav1.NewTime(time.Now()), }}, }, } @@ -286,10 +392,10 @@ func TestRosaControlPlaneReconcileStatusVersion(t *testing.T) { UID: types.UID("owner-cluster-1"), }, Spec: clusterv1.ClusterSpec{ - ControlPlaneRef: &corev1.ObjectReference{ - Name: rosaControlPlane.Name, - Kind: "ROSAControlPlane", - APIVersion: rosacontrolplanev1.GroupVersion.String(), + ControlPlaneRef: clusterv1.ContractVersionedObjectReference{ + Name: rosaControlPlane.Name, + Kind: "ROSAControlPlane", + APIGroup: rosacontrolplanev1.GroupVersion.Group, }, }, } @@ -395,12 +501,13 @@ func TestRosaControlPlaneReconcileStatusVersion(t *testing.T) { g.Expect(err).ShouldNot(HaveOccurred()) rosaControlPlane.Status = rosacontrolplanev1.RosaControlPlaneStatus{ ID: "rosa-control-plane-1", - Conditions: clusterv1.Conditions{clusterv1.Condition{ - Type: "Paused", - Status: "False", - Severity: "", - Reason: "NotPaused", - Message: "", + Conditions: clusterv1beta1.Conditions{clusterv1beta1.Condition{ + Type: "Paused", + Status: "False", + Severity: "", + Reason: "NotPaused", + Message: "", + LastTransitionTime: metav1.NewTime(time.Now()), }}, } @@ -412,7 +519,7 @@ func TestRosaControlPlaneReconcileStatusVersion(t *testing.T) { key := client.ObjectKey{Name: rosaControlPlane.Name, Namespace: rosaControlPlane.Namespace} errGet := testEnv.Get(ctx, key, cp) g.Expect(errGet).NotTo(HaveOccurred()) - oldCondition := conditions.Get(cp, clusterv1.PausedV1Beta2Condition) + oldCondition := v1beta1conditions.Get(cp, clusterv1beta1.PausedV1Beta2Condition) g.Expect(oldCondition).NotTo(BeNil()) r := ROSAControlPlaneReconciler{ diff --git a/controlplane/rosa/controllers/suite_test.go b/controlplane/rosa/controllers/suite_test.go index ebdfce2a76..d0b5c1c9e5 100644 --- a/controlplane/rosa/controllers/suite_test.go +++ b/controlplane/rosa/controllers/suite_test.go @@ -32,7 +32,7 @@ import ( rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) var ( diff --git a/devbox.lock b/devbox.lock index e537ec3f69..81e4b7c8d3 100644 --- a/devbox.lock +++ b/devbox.lock @@ -233,57 +233,54 @@ } } }, - "github:NixOS/nixpkgs/nixpkgs-unstable": { - "resolved": "github:NixOS/nixpkgs/3549532663732bfd89993204d40543e9edaec4f2?lastModified=1742272065&narHash=sha256-ud8vcSzJsZ%2FCK%2Br8%2Fv0lyf4yUntVmDq6Z0A41ODfWbE%3D" - }, "github:NixOS/nixpkgs/nixpkgs-unstable": { "resolved": "github:NixOS/nixpkgs/573c650e8a14b2faa0041645ab18aed7e60f0c9a?lastModified=1741865919&narHash=sha256-4thdbnP6dlbdq%2BqZWTsm4ffAwoS8Tiq1YResB%2BRP6WE%3D" }, - "go@1.22": { - "last_modified": "2024-12-23T21:10:33Z", - "resolved": "github:NixOS/nixpkgs/de1864217bfa9b5845f465e771e0ecb48b30e02d#go_1_22", + "go@latest": { + "last_modified": "2025-07-28T17:09:23Z", + "resolved": "github:NixOS/nixpkgs/648f70160c03151bc2121d179291337ad6bc564b#go", "source": "devbox-search", - "version": "1.22.10", + "version": "1.24.5", "systems": { "aarch64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/34qa7mwbc1ja7758q4d9sjwmgip72lj9-go-1.22.10", + "path": "/nix/store/kw1vd98s15vj700m3gx2x2xca2z477i3-go-1.24.5", "default": true } ], - "store_path": "/nix/store/34qa7mwbc1ja7758q4d9sjwmgip72lj9-go-1.22.10" + "store_path": "/nix/store/kw1vd98s15vj700m3gx2x2xca2z477i3-go-1.24.5" }, "aarch64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/gbidq6smzj09j6qmcdklrvrjgllxmr5j-go-1.22.10", + "path": "/nix/store/5bzlaj0c4mqw9p0zrcx5g9vz16vd45dl-go-1.24.5", "default": true } ], - "store_path": "/nix/store/gbidq6smzj09j6qmcdklrvrjgllxmr5j-go-1.22.10" + "store_path": "/nix/store/5bzlaj0c4mqw9p0zrcx5g9vz16vd45dl-go-1.24.5" }, "x86_64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/jgz3hrbqblw65v95npdnvlymlm991s0c-go-1.22.10", + "path": "/nix/store/b72n20ixzl5ja9vciwahkr30bhmsn5jc-go-1.24.5", "default": true } ], - "store_path": "/nix/store/jgz3hrbqblw65v95npdnvlymlm991s0c-go-1.22.10" + "store_path": "/nix/store/b72n20ixzl5ja9vciwahkr30bhmsn5jc-go-1.24.5" }, "x86_64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/1hd6kq7rssk14py6v8mrdq2pn5ciiw6y-go-1.22.10", + "path": "/nix/store/y4awwzp30ka130wmjrpaqjmjdf9p010w-go-1.24.5", "default": true } ], - "store_path": "/nix/store/1hd6kq7rssk14py6v8mrdq2pn5ciiw6y-go-1.22.10" + "store_path": "/nix/store/y4awwzp30ka130wmjrpaqjmjdf9p010w-go-1.24.5" } } }, @@ -499,51 +496,51 @@ } } }, - "kustomize@latest": { - "last_modified": "2025-03-11T17:52:14Z", - "resolved": "github:NixOS/nixpkgs/0d534853a55b5d02a4ababa1d71921ce8f0aee4c#kustomize", + "kustomize@5.5.0": { + "last_modified": "2024-12-23T21:10:33Z", + "resolved": "github:NixOS/nixpkgs/de1864217bfa9b5845f465e771e0ecb48b30e02d#kustomize", "source": "devbox-search", - "version": "5.6.0", + "version": "5.5.0", "systems": { "aarch64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/y85spf2nmlffzmq9lyzl8db7i0acdsqf-kustomize-5.6.0", + "path": "/nix/store/xpypw514kxv803li525a5by78g90ygyl-kustomize-5.5.0", "default": true } ], - "store_path": "/nix/store/y85spf2nmlffzmq9lyzl8db7i0acdsqf-kustomize-5.6.0" + "store_path": "/nix/store/xpypw514kxv803li525a5by78g90ygyl-kustomize-5.5.0" }, "aarch64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/pwnmchq4bafvkbb84m62m8vqp9dqgaz8-kustomize-5.6.0", + "path": "/nix/store/4v454dywxsyzcq9ypm7aa5v0mdlj9vqn-kustomize-5.5.0", "default": true } ], - "store_path": "/nix/store/pwnmchq4bafvkbb84m62m8vqp9dqgaz8-kustomize-5.6.0" + "store_path": "/nix/store/4v454dywxsyzcq9ypm7aa5v0mdlj9vqn-kustomize-5.5.0" }, "x86_64-darwin": { "outputs": [ { "name": "out", - "path": "/nix/store/hrlykqw1jcl2ykzida4amf2s5sjhdsng-kustomize-5.6.0", + "path": "/nix/store/6g9p4i5r4qvfdygh157376fhzpp55bqk-kustomize-5.5.0", "default": true } ], - "store_path": "/nix/store/hrlykqw1jcl2ykzida4amf2s5sjhdsng-kustomize-5.6.0" + "store_path": "/nix/store/6g9p4i5r4qvfdygh157376fhzpp55bqk-kustomize-5.5.0" }, "x86_64-linux": { "outputs": [ { "name": "out", - "path": "/nix/store/cwclm6315x1cn2kswzfhfcqp13qf44b0-kustomize-5.6.0", + "path": "/nix/store/5z7yq6831fsrsxsvg2ic229146ymmjaq-kustomize-5.5.0", "default": true } ], - "store_path": "/nix/store/cwclm6315x1cn2kswzfhfcqp13qf44b0-kustomize-5.6.0" + "store_path": "/nix/store/5z7yq6831fsrsxsvg2ic229146ymmjaq-kustomize-5.5.0" } } }, diff --git a/docs/book/src/development/releasing.md b/docs/book/src/development/releasing.md index 4020e84249..fad2ebd894 100644 --- a/docs/book/src/development/releasing.md +++ b/docs/book/src/development/releasing.md @@ -24,6 +24,7 @@ This includes the nightly image push jobs, which can be found at https://testgri 1. Make sure you have push permissions to the upstream CAPA repo. Push tag you've just created (`git push $VERSION`). Pushing this tag will kick off a GitHub Action that will create the release and attach the binaries and YAML templates to it. 1. A prow job will start running to push images to the staging repo, can be seen [here](https://testgrid.k8s.io/sig-cluster-lifecycle-image-pushes#post-cluster-api-provider-aws-push-images). The job is called "post-cluster-api-provider-aws-push-images," and is defined in . If this job fails due to Go versions being out of date, you may need to update the Google Cloud Builder (GCB) image used in [`cloudbuild.yaml`](https://github.com/kubernetes-sigs/cluster-api-provider-aws/blob/main/cloudbuild.yaml) and [`cloudbuild-nightly.yaml`](https://github.com/kubernetes-sigs/cluster-api-provider-aws/blob/main/cloudbuild-nightly.yaml). 1. When the job is finished, wait for the images to be created: `docker pull gcr.io/k8s-staging-cluster-api-aws/cluster-api-aws-controller:$VERSION`. You can also wrap this with a command to retry periodically, until the job is complete, e.g. `watch --interval 30 --chgexit docker pull <...>`. +1. Also pushing the tag will trigger a [GitHub Action](https://github.com/kubernetes-sigs/cluster-api-provider-aws/actions/workflows/release.yaml) to create a draft release. ## Promote container images from staging to production @@ -55,8 +56,10 @@ Promote the container images from the staging registry to the production registr ``` -## Verify and Publish the draft release - +## Verifing and Publish the release +1. Now a draft release should be available on the Github Releases page for the CAPA repository: https://github.com/kubernetes-sigs/cluster-api-provider-aws/releases you should go there and verify all the relevant informations are there before undrafting the release. +1. Make sure the release notes are present in the release description. +1. Update the release description to link to the promotion image. 1. Verify that all the files below are attached to the drafted release: 1. `clusterawsadm-darwin-amd64` 1. `clusterawsadm-darwin-arm64` @@ -82,7 +85,6 @@ Promote the container images from the staging registry to the production registr 1. `cluster-template-rosa.yaml` 1. `cluster-template-simple-clusterclass.yaml` 1. `metadata.yaml` -1. Update the release description to link to the promotion image. 1. Publish release. Use the pre-release option for release candidate versions of Cluster API Provider AWS. 1. Email `kubernetes-sig-cluster-lifecycle@googlegroups.com` to announce the release. You can use this template for the email: @@ -107,3 +109,8 @@ If the release is for a new MAJOR.MINOR version (i.e. not a patch release) then This is done by updating the [test-infra](https://github.com/kubernetes/test-infra) repo. For an example of PR see [this](https://github.com/kubernetes/test-infra/pull/33751) for the v2.7 release series. Consider removing jobs from an old release as well. We should only keep jobs for 2 release branches. + +### Prepare main for the next release + +1. Adjust `metadata.yaml` by adding the next release (for example if v2.7 has just been released add v2.8 here). +1. Adjust `test/e2e/data/shared/v1beta2_provider/metadata.yaml` by adding the now released MAJOR.MINOR release (the new release will still be referred as `9.9`) diff --git a/docs/book/src/topics/eks/creating-a-cluster.md b/docs/book/src/topics/eks/creating-a-cluster.md index 0ef75009c6..7ec523837f 100644 --- a/docs/book/src/topics/eks/creating-a-cluster.md +++ b/docs/book/src/topics/eks/creating-a-cluster.md @@ -14,6 +14,9 @@ clusterctl generate cluster capi-eks-quickstart --flavor eks-managedmachinepool NOTE: When creating an EKS cluster only the **MAJOR.MINOR** of the `-kubernetes-version` is taken into consideration. +By default CAPA relies on the default EKS cluster upgrade policy, which at the moment of writing is EXTENDED support. +See more info about [cluster upgrade policy](https://docs.aws.amazon.com/eks/latest/userguide/view-upgrade-policy.html) + ## Kubeconfig When creating an EKS cluster 2 kubeconfigs are generated and stored as secrets in the management cluster. This is different to when you create a non-managed cluster using the AWS provider. diff --git a/docs/book/src/topics/rosa/creating-a-cluster.md b/docs/book/src/topics/rosa/creating-a-cluster.md index 0d58b6f5bb..f196afddb4 100644 --- a/docs/book/src/topics/rosa/creating-a-cluster.md +++ b/docs/book/src/topics/rosa/creating-a-cluster.md @@ -89,30 +89,46 @@ The SSO offline token is being deprecated and it is recommended to use service a Follow the guide [here](https://docs.aws.amazon.com/ROSA/latest/userguide/getting-started-hcp.html) up until ["Create a ROSA with HCP Cluster"](https://docs.aws.amazon.com/ROSA/latest/userguide/getting-started-hcp.html#create-hcp-cluster-cli) to install the required tools and setup the prerequisite infrastructure. Once Step 3 is done, you will be ready to proceed with creating a ROSA HCP cluster using cluster-api. +Note; Skip the "Create the required IAM roles and OpenID Connect configuration" step from the prerequisites url above and use the templates/cluster-template-rosa-role-config.yaml to generate a ROSARoleConfig CR to create the required account roles, operator roles & managed OIDC provider. + ## Creating the cluster 1. Prepare the environment: ```bash - export OPENSHIFT_VERSION="4.14.5" + export OPENSHIFT_VERSION="4.19.0" export AWS_REGION="us-west-2" export AWS_AVAILABILITY_ZONE="us-west-2a" export AWS_ACCOUNT_ID="" export AWS_CREATOR_ARN="" # can be retrieved e.g. using `aws sts get-caller-identity` + # Note: if using templates/cluster-template-rosa.yaml set the below env variables export OIDC_CONFIG_ID="" # OIDC config id creating previously with `rosa create oidc-config` export ACCOUNT_ROLES_PREFIX="ManagedOpenShift-HCP" # prefix used to create account IAM roles with `rosa create account-roles` export OPERATOR_ROLES_PREFIX="capi-rosa-quickstart" # prefix used to create operator roles with `rosa create operator-roles --prefix ` + # Note: if using templates/cluster-template-rosa-role-config.yaml set the below env variables + export ACCOUNT_ROLES_PREFIX="capa" # prefix can be change to preferable prefix with max 4 chars + export OPERATOR_ROLES_PREFIX="capa" # prefix can be change to preferable prefix with max 4 chars + # subnet IDs created earlier export PUBLIC_SUBNET_ID="subnet-0b54a1111111111111" export PRIVATE_SUBNET_ID="subnet-05e72222222222222" ``` 1. Render the cluster manifest using the ROSA HCP cluster template: + + a. Using templates/cluster-template-rosa.yaml + + Note: The AWS role name must be no more than 64 characters in length. Otherwise an error will be returned. Truncate values exceeding 64 characters. ```shell clusterctl generate cluster --from templates/cluster-template-rosa.yaml > rosa-capi-cluster.yaml ``` - Note: The AWS role name must be no more than 64 characters in length. Otherwise an error will be returned. Truncate values exceeding 64 characters. + + b. Using templates/cluster-template-rosa-role-config.yaml + ```shell + clusterctl generate cluster --from templates/cluster-template-rosa-role-config.yaml > rosa-capi-cluster.yaml + ``` + 1. If a credentials secret was created earlier, edit `ROSAControlPlane` to reference it: ```yaml @@ -151,7 +167,7 @@ Follow the guide [here](https://docs.aws.amazon.com/ROSA/latest/userguide/gettin see [Multi-tenancy](../multitenancy.md) for more details -1. Finally apply the manifest to create your Rosa cluster: +1. Finally apply the manifest to create your ROSA cluster: ```shell kubectl apply -f rosa-capi-cluster.yaml ``` diff --git a/docs/book/src/topics/rosa/index.md b/docs/book/src/topics/rosa/index.md index fc6df78113..0e2ec9394b 100644 --- a/docs/book/src/topics/rosa/index.md +++ b/docs/book/src/topics/rosa/index.md @@ -7,7 +7,7 @@ The AWS provider supports creating Red Hat OpenShift Service on AWS ([ROSA](https://www.redhat.com/en/technologies/cloud-computing/openshift/aws)) based cluster. Currently the following features are supported: -- Provisioning/Deleting a ROSA cluster with hosted control planes ([HCP](https://docs.openshift.com/rosa/rosa_hcp/rosa-hcp-sts-creating-a-cluster-quickly.html)) +- Provisioning/Deleting a ROSA cluster with hosted control planes ([HCP](https://docs.redhat.com/en/documentation/red_hat_openshift_service_on_aws/4)) The implementation introduces the following CRD kinds: @@ -23,4 +23,4 @@ A new template is available in the templates folder for creating a managed ROSA * [Creating MachinePools](creating-rosa-machinepools.md) * [Upgrades](upgrades.md) * [External Auth Providers](external-auth.md) -* [Support](support.md) \ No newline at end of file +* [Support](support.md) diff --git a/docs/proposal/20250922-nodeadm-bootstrap.md b/docs/proposal/20250922-nodeadm-bootstrap.md new file mode 100644 index 0000000000..734c62a973 --- /dev/null +++ b/docs/proposal/20250922-nodeadm-bootstrap.md @@ -0,0 +1,302 @@ +--- +title: Proposal EKS Support in CAPA for nodeadm +authors: + - "@faiq" +reviewers: +creation-date: 2025-09-22 +last-updated: 2025-09-22 +status: proposed +see-also: +- https://github.com/kubernetes-sigs/cluster-api-provider-aws/pull/3518 +replaces: [] +superseded-by: [] +--- + + +## Table of Contents +- [Summary](#summary) +- [Motivation](#motivation) + - [Goals](#goals) + - [Non-Goals](#non-goals) +- [Proposal](#proposal) + - [User Stories](#user-stories) +- [Alternatives](#alternatives) + + + ## Summary + + Currently, EKS support in the Cluster API Provider for AWS (CAPA) is broken for Amazon Linux 2023 (AL2023) because the `bootstrap.sh` script is no longer supported. This proposal introduces a new Custom Resource Definition (CRD), `NodeadmConfig`, to handle the new `nodeadm` bootstrapping method required by AL2023. This approach is favored over modifying the existing `EKSConfig` type to maintain a cleaner API, avoid fields that are not cross-compatible between bootstrapping methods, and simplify the future deprecation of the `bootstrap.sh` implementation. + + ----- + + ## Motivation + + Currently EKS support in CAPA is broken for AL2023 (Amazon Linux 2023) because the bootstrapping method that was previously being used to provision EKS nodes is no longer supported `bootstrap.sh`. Users who are using AL2023 see errors like this on the worker nodes: + + ```bash + [root@localhost bin]# /etc/eks/bootstrap.sh default_dk-eks-133-control-plane + + + \!\!\!\!\!\!\!\!\!\! + \!\!\!\!\!\!\!\!\!\! ERROR: bootstrap.sh has been removed from AL2023-based EKS AMIs. + \!\!\!\!\!\!\!\!\!\! + \!\!\!\!\!\!\!\!\!\! EKS nodes are now initialized by nodeadm. + \!\!\!\!\!\!\!\!\!\! + \!\!\!\!\!\!\!\!\!\! To migrate your user data, see: + \!\!\!\!\!\!\!\!\!\! + \!\!\!\!\!\!\!\!\!\! https://awslabs.github.io/amazon-eks-ami/nodeadm/ + \!\!\!\!\!\!\!\!\!\! + + ```` + + In CAPA our implementation of the EKS bootstrapping method is currently tied to the `bootstrap.sh` script and is implemented by the `EKSConfig` type. + + Additionally, the EKS team is not publishing any more AmazonLinux (AL2) AMIs after November 26th, 2025, and Kubernetes version 1.32 is the last version for which AL2 AMIs will be released. This makes the transition to a new bootstrapping method for AL2023 urgent. + + ### Goals + + * Restore the ability to provision EKS nodes using CAPA with AL2023 AMIs. + * Introduce a new, clean API (`NodeadmConfig`) specifically for the `nodeadm` bootstrap method. + * Provide a clear upgrade path for users moving from `EKSConfig` (`bootstrap.sh`) to `NodeadmConfig` (`nodeadm`). + * Make future deprecation of the `bootstrap.sh` implementation in `EKSConfig` easier. + + ### Non-Goals + * Create a metatype that can handle both bootstrap.sh and nodeadm. + * Handle Operating Systems with different bootstrapping mechanisms like bottlerocket. + ----- + + ## Proposal + +This KEP proposes a new type that handles bootstrapping with `nodeadm` alone. This new type, `NodeadmConfig`, will wrap the API implementation for the Nodeadm option as a bootstrap provider. + + This approach is proposed due to drawbacks with the alternative of modifying the existing `EKSConfig` type, which would involve the introduction of new fields to distinguish between bootstrap methods and lead to a confusing API where some fields are only valid for one method. + + Examples of fields in the existing API that are no longer valid with `nodeadm`: + + * `ContainerRuntime` + * `DNSClusterIP` + * `DockerConfigJSON` + * `APIRetryAttempts` + * `PostBootstrapCommands` + * `BootstrapCommandOverride` + + The **pros** of this approach are: + + * A cleaner API that’s more descriptive for each bootstrap method. + * A new implementation will make deprecating EKSConfig’s `bootstrap.sh` implementation easier. + + The **cons** are: + + * The scope of work to support EKS nodes grows significantly and is pushed out. + + ### User Stories + + * As a cluster admin I need to provision nodes to my EKS cluster using Kubernetes 1.33 or higher + * As a cluster admin I need to provision EKS worker nodes using the latest AL2023 AMIs + * As a cluster admin I need to upgrade my existing EKS cluster nodes from an AL2-based version (e.g., 1.32) to an AL2023-based version (e.g., 1.33) with minimal disruption. + + ### API Design + + On a high level this new type `NodeadmConfig` wraps the API implementation for the Nodeadm option as a bootstrap provider. + + ```go + // NodeadmConfigSpec defines the desired state of NodeadmConfig. + type NodeadmConfigSpec struct { + // Kubelet contains options for kubelet. + // +optional + Kubelet *KubeletOptions `json:"kubelet,omitempty"` + + // Containerd contains options for containerd. + // +optional + Containerd *ContainerdOptions `json:"containerd,omitempty"` + + // FeatureGates holds key-value pairs to enable or disable application features. + // +optional + FeatureGates map[Feature]bool `json:"featureGates,omitempty"` + + // PreBootstrapCommands specifies extra commands to run before bootstrapping nodes. + // +optional + PreBootstrapCommands []string `json:"preBootstrapCommands,omitempty"` + + // Files specifies extra files to be passed to user_data upon creation. + // +optional + Files []File `json:"files,omitempty"` + + // Users specifies extra users to add. + // +optional + Users []User `json:"users,omitempty"` + + // NTP specifies NTP configuration. + // +optional + NTP *NTP `json:"ntp,omitempty"` + + // DiskSetup specifies options for the creation of partition tables and file systems on devices. + // +optional + DiskSetup *DiskSetup `json:"diskSetup,omitempty"` + + // Mounts specifies a list of mount points to be setup. + // +optional + Mounts []MountPoints `json:"mounts,omitempty"` + } + ``` + + ----- + + ## Design Details + + ### Upgrade Strategy + +A valid concern that CAPA users will have is upgrading existing clusters to machines that use the new bootstrap `Nodeadm` CRD. This KEP does not change the process. As before, the user will reference a new BootstrapConfigTemplate. However, the kind will change from EKSConfigTemplate to NodeadmConfigTemplate. + + #### MachineDeployment Upgrade Example + + A user with a `MachineDeployment` using `EKSConfig` for Kubernetes v1.32 would upgrade to v1.33 by creating a new `NodeadmConfigTemplate` and updating the `MachineDeployment` to reference it and the new Kubernetes version. New machines are rolled out according to the `MachineDeployment` update strategy. + + **Before (v1.32 with `EKSConfigTemplate`):** + + ```yaml + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 + kind: EKSConfigTemplate + metadata: + name: default132 + spec: + template: + spec: + postBootstrapCommands: + - "echo \"bye world\"" + --- + apiVersion: cluster.x-k8s.io/v1beta1 + kind: MachineDeployment + metadata: + name: default + spec: + clusterName: default + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 + kind: EKSConfigTemplate + name: default132 + infrastructureRef: + kind: AWSMachineTemplate + name: default132 + version: v1.32.0 + ```` + + **After (v1.33 with `NodeadmConfigTemplate`):** + + ```yaml + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 + kind: NodeadmConfigTemplate + metadata: + name: default + spec: + template: + spec: + preBootstrapCommands: + - "echo \"hello world\"" + --- + apiVersion: cluster.x-k8s.io/v1beta1 + kind: MachineDeployment + metadata: + name: default + spec: + clusterName: default + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 + kind: NodeadmConfigTemplate + name: default + infrastructureRef: + kind: AWSMachineTemplate + name: default + version: v1.33.0 + ``` + + #### MachinePool Upgrade Example + + The flow would be very similar for `MachinePools`. A user would update the `MachinePool` resource to reference a new `NodeadmConfigTemplate` and the target Kubernetes version. + + **Before (v1.32 with `EKSConfigTemplate`):** + + ```yaml + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 + kind: EKSConfigTemplate + metadata: + name: default-132 + spec: + template: + spec: {} + --- + apiVersion: cluster.x-k8s.io/v1beta1 + kind: MachinePool + metadata: + name: default + spec: + clusterName: default + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 + kind: EKSConfigTemplate + name: default-132 + infrastructureRef: + kind: AWSMachinePool + name: default + version: v1.32.0 + ``` + + **After (v1.33 with `NodeadmConfigTemplate`):** + + ```yaml + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 + kind: NodeadmConfigTemplate + metadata: + name: default-133 + spec: + template: + spec: + preBootstrapCommands: + - "echo \"hello from v1.33.0\"" + --- + apiVersion: cluster.x-k8s.io/v1beta1 + kind: MachinePool + metadata: + name: default + spec: + clusterName: default + template: + spec: + bootstrap: + configRef: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta2 + kind: NodeadmConfigTemplate + name: default-133 + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + kind: AWSMachinePool + name: default + version: v1.33.0 + ``` + + ### Test Plan + + * Unit tests for the new code. + * Integration tests for new Nodeadm Controller. + * E2e tests exercising the migration from EKSConfig to NodeadmConfig, + + + ----- + + ## Alternatives + + The primary alternative considered was to modify the existing `EKSConfig` type to support `nodeadm`. Currently, there’s work being done upstream to address this gap. On a high level, [this PR](https://github.com/kubernetes-sigs/cluster-api-provider-aws/pull/5553) is adding a new bootstrapping implementation to the existing `EKSConfig` type with some additional API fields to distinguish between bootstrap methods. + + However, there are some drawbacks with this implementation regarding the API design: + + * **Introduction of new fields to distinguish between bootstrap methods**: This complicates the API. + * **Fields that are valid for `bootstrap.sh` are not valid for `nodeadm` and vice versa**: This would lead to a confusing user experience where users could set fields that have no effect for their chosen bootstrap method. diff --git a/docs/triage-party/Dockerfile b/docs/triage-party/Dockerfile index 27cc7cf8d2..b5b230213f 100644 --- a/docs/triage-party/Dockerfile +++ b/docs/triage-party/Dockerfile @@ -28,7 +28,6 @@ FROM gcr.io/distroless/base:latest WORKDIR /app COPY --chown=nobody:nobody --from=builder /go/bin/server /app/triage-party COPY --chown=nobody:nobody --from=builder /git/triage-party/site /app/site -COPY --chown=nobody:nobody --from=builder /git/triage-party/third_party /app/third_party COPY --chown=nobody:nobody config/config.yaml /app/config/config.yaml COPY --chown=nobody:nobody --from=builder /.cache /app/.cache ENV HOME /app diff --git a/examples/machine-with-dynamic-dedicated-host.yaml b/examples/machine-with-dynamic-dedicated-host.yaml new file mode 100644 index 0000000000..46dcf1b069 --- /dev/null +++ b/examples/machine-with-dynamic-dedicated-host.yaml @@ -0,0 +1,47 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: AWSMachine +metadata: + name: test-machine-with-dynamic-host + namespace: default +spec: + instanceType: m5.large + ami: + id: ami-0abcdef1234567890 + iamInstanceProfile: nodes.cluster-api-provider-aws.sigs.k8s.io + + # Additional tags that will be applied to the dedicated host + # These tags can be overridden by dedicated host specific tags + additionalTags: + Environment: "test" + Owner: "platform-team" + CostCenter: "engineering" + + # Dynamic dedicated host allocation configuration + # This will allocate a single dedicated host automatically + dynamicHostAllocation: + # Tags to apply to the allocated dedicated host (optional) + # These tags take precedence over additionalTags above + tags: + Environment: "production" # This will override the "test" value from additionalTags + Application: "virtualization" + Purpose: "BYOL-Windows" + + # Standard instance configuration + subnet: + id: subnet-0a1b2c3d4e5f6g7h8 + + securityGroupOverrides: + - id: sg-0123456789abcdef0 + + userData: + name: test-userdata + namespace: default +--- +apiVersion: v1 +kind: Secret +metadata: + name: test-userdata + namespace: default +type: Opaque +data: + userData: IyEvYmluL2Jhc2gKZWNobyAiSGVsbG8gV29ybGQi # base64 encoded "#!/bin/bash\necho \"Hello World\"" \ No newline at end of file diff --git a/exp/api/v1beta1/awsfargateprofile_types.go b/exp/api/v1beta1/awsfargateprofile_types.go index 155ab4915a..3bdcc0f5eb 100644 --- a/exp/api/v1beta1/awsfargateprofile_types.go +++ b/exp/api/v1beta1/awsfargateprofile_types.go @@ -23,15 +23,13 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) -var ( - // DefaultEKSFargateRole is the name of the default IAM role to use for fargate - // profiles if no other role is supplied in the spec and if iam role creation - // is not enabled. The default can be created using clusterawsadm or created manually. - DefaultEKSFargateRole = fmt.Sprintf("eks-fargate%s", iamv1.DefaultNameSuffix) -) +// DefaultEKSFargateRole is the name of the default IAM role to use for fargate +// profiles if no other role is supplied in the spec and if iam role creation +// is not enabled. The default can be created using clusterawsadm or created manually. +var DefaultEKSFargateRole = fmt.Sprintf("eks-fargate%s", iamv1.DefaultNameSuffix) // FargateProfileSpec defines the desired state of FargateProfile. type FargateProfileSpec struct { @@ -118,7 +116,7 @@ type FargateProfileStatus struct { // Conditions defines current state of the Fargate profile. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -139,12 +137,12 @@ type AWSFargateProfile struct { } // GetConditions returns the observations of the operational state of the AWSFargateProfile resource. -func (r *AWSFargateProfile) GetConditions() clusterv1.Conditions { +func (r *AWSFargateProfile) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the AWSFargateProfile to the predescribed clusterv1.Conditions. -func (r *AWSFargateProfile) SetConditions(conditions clusterv1.Conditions) { +// SetConditions sets the underlying service state of the AWSFargateProfile to the predescribed clusterv1betav1.Conditions. +func (r *AWSFargateProfile) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/exp/api/v1beta1/awsmachinepool_types.go b/exp/api/v1beta1/awsmachinepool_types.go index fc70422c03..c4061baf12 100644 --- a/exp/api/v1beta1/awsmachinepool_types.go +++ b/exp/api/v1beta1/awsmachinepool_types.go @@ -21,7 +21,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // Constants block. @@ -116,7 +116,7 @@ type AWSMachinePoolStatus struct { // Conditions defines current service state of the AWSMachinePool. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // Instances contains the status for each instance in the pool // +optional @@ -215,12 +215,12 @@ func init() { } // GetConditions returns the observations of the operational state of the AWSMachinePool resource. -func (r *AWSMachinePool) GetConditions() clusterv1.Conditions { +func (r *AWSMachinePool) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the AWSMachinePool to the predescribed clusterv1.Conditions. -func (r *AWSMachinePool) SetConditions(conditions clusterv1.Conditions) { +// SetConditions sets the underlying service state of the AWSMachinePool to the predescribed clusterv1beta1.Conditions. +func (r *AWSMachinePool) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/exp/api/v1beta1/awsmanagedmachinepool_types.go b/exp/api/v1beta1/awsmanagedmachinepool_types.go index bd9632f95b..08fdd1d0c9 100644 --- a/exp/api/v1beta1/awsmanagedmachinepool_types.go +++ b/exp/api/v1beta1/awsmanagedmachinepool_types.go @@ -23,7 +23,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // ManagedMachineAMIType specifies which AWS AMI to use for a managed MachinePool. @@ -52,12 +52,10 @@ const ( ManagedMachinePoolCapacityTypeSpot ManagedMachinePoolCapacityType = "spot" ) -var ( - // DefaultEKSNodegroupRole is the name of the default IAM role to use for EKS nodegroups - // if no other role is supplied in the spec and if iam role creation is not enabled. The default - // can be created using clusterawsadm or created manually. - DefaultEKSNodegroupRole = fmt.Sprintf("eks-nodegroup%s", iamv1.DefaultNameSuffix) -) +// DefaultEKSNodegroupRole is the name of the default IAM role to use for EKS nodegroups +// if no other role is supplied in the spec and if iam role creation is not enabled. The default +// can be created using clusterawsadm or created manually. +var DefaultEKSNodegroupRole = fmt.Sprintf("eks-nodegroup%s", iamv1.DefaultNameSuffix) // AWSManagedMachinePoolSpec defines the desired state of AWSManagedMachinePool. type AWSManagedMachinePoolSpec struct { @@ -233,7 +231,7 @@ type AWSManagedMachinePoolStatus struct { // Conditions defines current service state of the managed machine pool // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -253,12 +251,12 @@ type AWSManagedMachinePool struct { } // GetConditions returns the observations of the operational state of the AWSManagedMachinePool resource. -func (r *AWSManagedMachinePool) GetConditions() clusterv1.Conditions { +func (r *AWSManagedMachinePool) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the AWSManagedMachinePool to the predescribed clusterv1.Conditions. -func (r *AWSManagedMachinePool) SetConditions(conditions clusterv1.Conditions) { +// SetConditions sets the underlying service state of the AWSManagedMachinePool to the predescribed clusterv1beta1.Conditions. +func (r *AWSManagedMachinePool) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/exp/api/v1beta1/conditions_consts.go b/exp/api/v1beta1/conditions_consts.go index 534ebb2bf9..0a9f7de1d0 100644 --- a/exp/api/v1beta1/conditions_consts.go +++ b/exp/api/v1beta1/conditions_consts.go @@ -16,11 +16,11 @@ limitations under the License. package v1beta1 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +import clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" const ( // ASGReadyCondition reports on current status of the autoscaling group. Ready indicates the group is provisioned. - ASGReadyCondition clusterv1.ConditionType = "ASGReady" + ASGReadyCondition clusterv1beta1.ConditionType = "ASGReady" // ASGNotFoundReason used when the autoscaling group couldn't be retrieved. ASGNotFoundReason = "ASGNotFound" // ASGProvisionFailedReason used for failures during autoscaling group provisioning. @@ -29,7 +29,7 @@ const ( ASGDeletionInProgress = "ASGDeletionInProgress" // LaunchTemplateReadyCondition represents the status of an AWSMachinePool's associated Launch Template. - LaunchTemplateReadyCondition clusterv1.ConditionType = "LaunchTemplateReady" + LaunchTemplateReadyCondition clusterv1beta1.ConditionType = "LaunchTemplateReady" // LaunchTemplateNotFoundReason is used when an associated Launch Template can't be found. LaunchTemplateNotFoundReason = "LaunchTemplateNotFound" // LaunchTemplateCreateFailedReason used for failures during Launch Template creation. @@ -38,9 +38,9 @@ const ( LaunchTemplateReconcileFailedReason = "LaunchTemplateReconcileFailed" // PreLaunchTemplateUpdateCheckCondition reports if all prerequisite are met for launch template update. - PreLaunchTemplateUpdateCheckCondition clusterv1.ConditionType = "PreLaunchTemplateUpdateCheckSuccess" + PreLaunchTemplateUpdateCheckCondition clusterv1beta1.ConditionType = "PreLaunchTemplateUpdateCheckSuccess" // PostLaunchTemplateUpdateOperationCondition reports on successfully completes post launch template update operation. - PostLaunchTemplateUpdateOperationCondition clusterv1.ConditionType = "PostLaunchTemplateUpdateOperationSuccess" + PostLaunchTemplateUpdateOperationCondition clusterv1beta1.ConditionType = "PostLaunchTemplateUpdateOperationSuccess" // PreLaunchTemplateUpdateCheckFailedReason used to report when not all prerequisite are met for launch template update. PreLaunchTemplateUpdateCheckFailedReason = "PreLaunchTemplateUpdateCheckFailed" @@ -48,7 +48,7 @@ const ( PostLaunchTemplateUpdateOperationFailedReason = "PostLaunchTemplateUpdateOperationFailed" // InstanceRefreshStartedCondition reports on successfully starting instance refresh. - InstanceRefreshStartedCondition clusterv1.ConditionType = "InstanceRefreshStarted" + InstanceRefreshStartedCondition clusterv1beta1.ConditionType = "InstanceRefreshStarted" // InstanceRefreshNotReadyReason used to report instance refresh is not initiated. // If there are instance refreshes that are in progress, then a new instance refresh request will fail. InstanceRefreshNotReadyReason = "InstanceRefreshNotReady" @@ -58,7 +58,7 @@ const ( const ( // EKSNodegroupReadyCondition condition reports on the successful reconciliation of eks control plane. - EKSNodegroupReadyCondition clusterv1.ConditionType = "EKSNodegroupReady" + EKSNodegroupReadyCondition clusterv1beta1.ConditionType = "EKSNodegroupReady" // EKSNodegroupReconciliationFailedReason used to report failures while reconciling EKS control plane. EKSNodegroupReconciliationFailedReason = "EKSNodegroupReconciliationFailed" // WaitingForEKSControlPlaneReason used when the machine pool is waiting for @@ -68,10 +68,10 @@ const ( const ( // EKSFargateProfileReadyCondition condition reports on the successful reconciliation of eks control plane. - EKSFargateProfileReadyCondition clusterv1.ConditionType = "EKSFargateProfileReady" + EKSFargateProfileReadyCondition clusterv1beta1.ConditionType = "EKSFargateProfileReady" // EKSFargateCreatingCondition condition reports on whether the fargate // profile is creating. - EKSFargateCreatingCondition clusterv1.ConditionType = "EKSFargateCreating" + EKSFargateCreatingCondition clusterv1beta1.ConditionType = "EKSFargateCreating" // EKSFargateDeletingCondition used to report that the profile is deleting. EKSFargateDeletingCondition = "EKSFargateDeleting" // EKSFargateReconciliationFailedReason used to report failures while reconciling EKS control plane. @@ -91,13 +91,13 @@ const ( const ( // IAMNodegroupRolesReadyCondition condition reports on the successful // reconciliation of EKS nodegroup iam roles. - IAMNodegroupRolesReadyCondition clusterv1.ConditionType = "IAMNodegroupRolesReady" + IAMNodegroupRolesReadyCondition clusterv1beta1.ConditionType = "IAMNodegroupRolesReady" // IAMNodegroupRolesReconciliationFailedReason used to report failures while // reconciling EKS nodegroup iam roles. IAMNodegroupRolesReconciliationFailedReason = "IAMNodegroupRolesReconciliationFailed" // IAMFargateRolesReadyCondition condition reports on the successful // reconciliation of EKS nodegroup iam roles. - IAMFargateRolesReadyCondition clusterv1.ConditionType = "IAMFargateRolesReady" + IAMFargateRolesReadyCondition clusterv1beta1.ConditionType = "IAMFargateRolesReady" // IAMFargateRolesReconciliationFailedReason used to report failures while // reconciling EKS nodegroup iam roles. IAMFargateRolesReconciliationFailedReason = "IAMFargateRolesReconciliationFailed" diff --git a/exp/api/v1beta1/conversion.go b/exp/api/v1beta1/conversion.go index cf4040a456..7b49ed9f64 100644 --- a/exp/api/v1beta1/conversion.go +++ b/exp/api/v1beta1/conversion.go @@ -20,8 +20,6 @@ import ( apiconversion "k8s.io/apimachinery/pkg/conversion" "sigs.k8s.io/controller-runtime/pkg/conversion" - infrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1" - infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) @@ -149,6 +147,10 @@ func (src *AWSManagedMachinePool) ConvertTo(dstRaw conversion.Hub) error { dst.Spec.RolePath = restored.Spec.RolePath dst.Spec.RolePermissionsBoundary = restored.Spec.RolePermissionsBoundary + if restored.Spec.NodeRepairConfig != nil { + dst.Spec.NodeRepairConfig = restored.Spec.NodeRepairConfig + } + return nil } @@ -230,26 +232,6 @@ func (r *AWSFargateProfileList) ConvertFrom(srcRaw conversion.Hub) error { return Convert_v1beta2_AWSFargateProfileList_To_v1beta1_AWSFargateProfileList(src, r, nil) } -// Convert_v1beta1_AMIReference_To_v1beta2_AMIReference converts the v1beta1 AMIReference receiver to a v1beta2 AMIReference. -func Convert_v1beta1_AMIReference_To_v1beta2_AMIReference(in *infrav1beta1.AMIReference, out *infrav1.AMIReference, s apiconversion.Scope) error { - return infrav1beta1.Convert_v1beta1_AMIReference_To_v1beta2_AMIReference(in, out, s) -} - -// Convert_v1beta2_AMIReference_To_v1beta1_AMIReference converts the v1beta2 AMIReference receiver to a v1beta1 AMIReference. -func Convert_v1beta2_AMIReference_To_v1beta1_AMIReference(in *infrav1.AMIReference, out *infrav1beta1.AMIReference, s apiconversion.Scope) error { - return infrav1beta1.Convert_v1beta2_AMIReference_To_v1beta1_AMIReference(in, out, s) -} - -// Convert_v1beta2_Instance_To_v1beta1_Instance is a conversion function. -func Convert_v1beta2_Instance_To_v1beta1_Instance(in *infrav1.Instance, out *infrav1beta1.Instance, s apiconversion.Scope) error { - return infrav1beta1.Convert_v1beta2_Instance_To_v1beta1_Instance(in, out, s) -} - -// Convert_v1beta1_Instance_To_v1beta2_Instance is a conversion function. -func Convert_v1beta1_Instance_To_v1beta2_Instance(in *infrav1beta1.Instance, out *infrav1.Instance, s apiconversion.Scope) error { - return infrav1beta1.Convert_v1beta1_Instance_To_v1beta2_Instance(in, out, s) -} - // Convert_v1beta2_AWSLaunchTemplate_To_v1beta1_AWSLaunchTemplate converts the v1beta2 AWSLaunchTemplate receiver to a v1beta1 AWSLaunchTemplate. func Convert_v1beta2_AWSLaunchTemplate_To_v1beta1_AWSLaunchTemplate(in *expinfrav1.AWSLaunchTemplate, out *AWSLaunchTemplate, s apiconversion.Scope) error { return autoConvert_v1beta2_AWSLaunchTemplate_To_v1beta1_AWSLaunchTemplate(in, out, s) diff --git a/exp/api/v1beta1/zz_generated.conversion.go b/exp/api/v1beta1/zz_generated.conversion.go index 933a08f716..e825e20a31 100644 --- a/exp/api/v1beta1/zz_generated.conversion.go +++ b/exp/api/v1beta1/zz_generated.conversion.go @@ -28,7 +28,7 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" apiv1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" v1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" - apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + corev1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func init() { @@ -63,11 +63,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.AWSLaunchTemplate)(nil), (*AWSLaunchTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_AWSLaunchTemplate_To_v1beta1_AWSLaunchTemplate(a.(*v1beta2.AWSLaunchTemplate), b.(*AWSLaunchTemplate), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*AWSMachinePool)(nil), (*v1beta2.AWSMachinePool)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_AWSMachinePool_To_v1beta2_AWSMachinePool(a.(*AWSMachinePool), b.(*v1beta2.AWSMachinePool), scope) }); err != nil { @@ -103,21 +98,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.AWSMachinePoolSpec)(nil), (*AWSMachinePoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_AWSMachinePoolSpec_To_v1beta1_AWSMachinePoolSpec(a.(*v1beta2.AWSMachinePoolSpec), b.(*AWSMachinePoolSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*AWSMachinePoolStatus)(nil), (*v1beta2.AWSMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_AWSMachinePoolStatus_To_v1beta2_AWSMachinePoolStatus(a.(*AWSMachinePoolStatus), b.(*v1beta2.AWSMachinePoolStatus), scope) }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.AWSMachinePoolStatus)(nil), (*AWSMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_AWSMachinePoolStatus_To_v1beta1_AWSMachinePoolStatus(a.(*v1beta2.AWSMachinePoolStatus), b.(*AWSMachinePoolStatus), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*AWSManagedMachinePool)(nil), (*v1beta2.AWSManagedMachinePool)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_AWSManagedMachinePool_To_v1beta2_AWSManagedMachinePool(a.(*AWSManagedMachinePool), b.(*v1beta2.AWSManagedMachinePool), scope) }); err != nil { @@ -143,11 +128,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.AWSManagedMachinePoolSpec)(nil), (*AWSManagedMachinePoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_AWSManagedMachinePoolSpec_To_v1beta1_AWSManagedMachinePoolSpec(a.(*v1beta2.AWSManagedMachinePoolSpec), b.(*AWSManagedMachinePoolSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*AWSManagedMachinePoolStatus)(nil), (*v1beta2.AWSManagedMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_AWSManagedMachinePoolStatus_To_v1beta2_AWSManagedMachinePoolStatus(a.(*AWSManagedMachinePoolStatus), b.(*v1beta2.AWSManagedMachinePoolStatus), scope) }); err != nil { @@ -163,11 +143,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.AutoScalingGroup)(nil), (*AutoScalingGroup)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_AutoScalingGroup_To_v1beta1_AutoScalingGroup(a.(*v1beta2.AutoScalingGroup), b.(*AutoScalingGroup), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*BlockDeviceMapping)(nil), (*v1beta2.BlockDeviceMapping)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_BlockDeviceMapping_To_v1beta2_BlockDeviceMapping(a.(*BlockDeviceMapping), b.(*v1beta2.BlockDeviceMapping), scope) }); err != nil { @@ -193,11 +168,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.FargateProfileSpec)(nil), (*FargateProfileSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_FargateProfileSpec_To_v1beta1_FargateProfileSpec(a.(*v1beta2.FargateProfileSpec), b.(*FargateProfileSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*FargateProfileStatus)(nil), (*v1beta2.FargateProfileStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_FargateProfileStatus_To_v1beta2_FargateProfileStatus(a.(*FargateProfileStatus), b.(*v1beta2.FargateProfileStatus), scope) }); err != nil { @@ -273,11 +243,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.RefreshPreferences)(nil), (*RefreshPreferences)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_RefreshPreferences_To_v1beta1_RefreshPreferences(a.(*v1beta2.RefreshPreferences), b.(*RefreshPreferences), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*Taint)(nil), (*v1beta2.Taint)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_Taint_To_v1beta2_Taint(a.(*Taint), b.(*v1beta2.Taint), scope) }); err != nil { @@ -298,11 +263,45 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta2.AWSLaunchTemplate)(nil), (*AWSLaunchTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_AWSLaunchTemplate_To_v1beta1_AWSLaunchTemplate(a.(*v1beta2.AWSLaunchTemplate), b.(*AWSLaunchTemplate), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.AWSMachinePoolSpec)(nil), (*AWSMachinePoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_AWSMachinePoolSpec_To_v1beta1_AWSMachinePoolSpec(a.(*v1beta2.AWSMachinePoolSpec), b.(*AWSMachinePoolSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.AWSMachinePoolStatus)(nil), (*AWSMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_AWSMachinePoolStatus_To_v1beta1_AWSMachinePoolStatus(a.(*v1beta2.AWSMachinePoolStatus), b.(*AWSMachinePoolStatus), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.AWSManagedMachinePoolSpec)(nil), (*AWSManagedMachinePoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_AWSManagedMachinePoolSpec_To_v1beta1_AWSManagedMachinePoolSpec(a.(*v1beta2.AWSManagedMachinePoolSpec), b.(*AWSManagedMachinePoolSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.AutoScalingGroup)(nil), (*AutoScalingGroup)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_AutoScalingGroup_To_v1beta1_AutoScalingGroup(a.(*v1beta2.AutoScalingGroup), b.(*AutoScalingGroup), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.FargateProfileSpec)(nil), (*FargateProfileSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_FargateProfileSpec_To_v1beta1_FargateProfileSpec(a.(*v1beta2.FargateProfileSpec), b.(*FargateProfileSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.RefreshPreferences)(nil), (*RefreshPreferences)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_RefreshPreferences_To_v1beta1_RefreshPreferences(a.(*v1beta2.RefreshPreferences), b.(*RefreshPreferences), scope) + }); err != nil { + return err + } return nil } func autoConvert_v1beta1_AWSFargateProfile_To_v1beta2_AWSFargateProfile(in *AWSFargateProfile, out *v1beta2.AWSFargateProfile, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_FargateProfileSpec_To_v1beta2_FargateProfileSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -319,7 +318,6 @@ func Convert_v1beta1_AWSFargateProfile_To_v1beta2_AWSFargateProfile(in *AWSFarga } func autoConvert_v1beta2_AWSFargateProfile_To_v1beta1_AWSFargateProfile(in *v1beta2.AWSFargateProfile, out *AWSFargateProfile, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta2_FargateProfileSpec_To_v1beta1_FargateProfileSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -336,7 +334,6 @@ func Convert_v1beta2_AWSFargateProfile_To_v1beta1_AWSFargateProfile(in *v1beta2. } func autoConvert_v1beta1_AWSFargateProfileList_To_v1beta2_AWSFargateProfileList(in *AWSFargateProfileList, out *v1beta2.AWSFargateProfileList, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items @@ -358,7 +355,6 @@ func Convert_v1beta1_AWSFargateProfileList_To_v1beta2_AWSFargateProfileList(in * } func autoConvert_v1beta2_AWSFargateProfileList_To_v1beta1_AWSFargateProfileList(in *v1beta2.AWSFargateProfileList, out *AWSFargateProfileList, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items @@ -423,7 +419,6 @@ func autoConvert_v1beta2_AWSLaunchTemplate_To_v1beta1_AWSLaunchTemplate(in *v1be } func autoConvert_v1beta1_AWSMachinePool_To_v1beta2_AWSMachinePool(in *AWSMachinePool, out *v1beta2.AWSMachinePool, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_AWSMachinePoolSpec_To_v1beta2_AWSMachinePoolSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -440,7 +435,6 @@ func Convert_v1beta1_AWSMachinePool_To_v1beta2_AWSMachinePool(in *AWSMachinePool } func autoConvert_v1beta2_AWSMachinePool_To_v1beta1_AWSMachinePool(in *v1beta2.AWSMachinePool, out *AWSMachinePool, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta2_AWSMachinePoolSpec_To_v1beta1_AWSMachinePoolSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -479,7 +473,6 @@ func Convert_v1beta2_AWSMachinePoolInstanceStatus_To_v1beta1_AWSMachinePoolInsta } func autoConvert_v1beta1_AWSMachinePoolList_To_v1beta2_AWSMachinePoolList(in *AWSMachinePoolList, out *v1beta2.AWSMachinePoolList, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items @@ -501,7 +494,6 @@ func Convert_v1beta1_AWSMachinePoolList_To_v1beta2_AWSMachinePoolList(in *AWSMac } func autoConvert_v1beta2_AWSMachinePoolList_To_v1beta1_AWSMachinePoolList(in *v1beta2.AWSMachinePoolList, out *AWSMachinePoolList, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items @@ -587,7 +579,7 @@ func autoConvert_v1beta2_AWSMachinePoolSpec_To_v1beta1_AWSMachinePoolSpec(in *v1 func autoConvert_v1beta1_AWSMachinePoolStatus_To_v1beta2_AWSMachinePoolStatus(in *AWSMachinePoolStatus, out *v1beta2.AWSMachinePoolStatus, s conversion.Scope) error { out.Ready = in.Ready out.Replicas = in.Replicas - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) out.Instances = *(*[]v1beta2.AWSMachinePoolInstanceStatus)(unsafe.Pointer(&in.Instances)) out.LaunchTemplateID = in.LaunchTemplateID out.LaunchTemplateVersion = (*string)(unsafe.Pointer(in.LaunchTemplateVersion)) @@ -605,7 +597,7 @@ func Convert_v1beta1_AWSMachinePoolStatus_To_v1beta2_AWSMachinePoolStatus(in *AW func autoConvert_v1beta2_AWSMachinePoolStatus_To_v1beta1_AWSMachinePoolStatus(in *v1beta2.AWSMachinePoolStatus, out *AWSMachinePoolStatus, s conversion.Scope) error { out.Ready = in.Ready out.Replicas = in.Replicas - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) out.Instances = *(*[]AWSMachinePoolInstanceStatus)(unsafe.Pointer(&in.Instances)) out.LaunchTemplateID = in.LaunchTemplateID out.LaunchTemplateVersion = (*string)(unsafe.Pointer(in.LaunchTemplateVersion)) @@ -617,7 +609,6 @@ func autoConvert_v1beta2_AWSMachinePoolStatus_To_v1beta1_AWSMachinePoolStatus(in } func autoConvert_v1beta1_AWSManagedMachinePool_To_v1beta2_AWSManagedMachinePool(in *AWSManagedMachinePool, out *v1beta2.AWSManagedMachinePool, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta1_AWSManagedMachinePoolSpec_To_v1beta2_AWSManagedMachinePoolSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -634,7 +625,6 @@ func Convert_v1beta1_AWSManagedMachinePool_To_v1beta2_AWSManagedMachinePool(in * } func autoConvert_v1beta2_AWSManagedMachinePool_To_v1beta1_AWSManagedMachinePool(in *v1beta2.AWSManagedMachinePool, out *AWSManagedMachinePool, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ObjectMeta = in.ObjectMeta if err := Convert_v1beta2_AWSManagedMachinePoolSpec_To_v1beta1_AWSManagedMachinePoolSpec(&in.Spec, &out.Spec, s); err != nil { return err @@ -651,7 +641,6 @@ func Convert_v1beta2_AWSManagedMachinePool_To_v1beta1_AWSManagedMachinePool(in * } func autoConvert_v1beta1_AWSManagedMachinePoolList_To_v1beta2_AWSManagedMachinePoolList(in *AWSManagedMachinePoolList, out *v1beta2.AWSManagedMachinePoolList, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items @@ -673,7 +662,6 @@ func Convert_v1beta1_AWSManagedMachinePoolList_To_v1beta2_AWSManagedMachinePoolL } func autoConvert_v1beta2_AWSManagedMachinePoolList_To_v1beta1_AWSManagedMachinePoolList(in *v1beta2.AWSManagedMachinePoolList, out *AWSManagedMachinePoolList, s conversion.Scope) error { - out.TypeMeta = in.TypeMeta out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items @@ -760,6 +748,7 @@ func autoConvert_v1beta2_AWSManagedMachinePoolSpec_To_v1beta1_AWSManagedMachineP out.AWSLaunchTemplate = nil } // WARNING: in.AWSLifecycleHooks requires manual conversion: does not exist in peer-type + // WARNING: in.NodeRepairConfig requires manual conversion: does not exist in peer-type return nil } @@ -770,7 +759,7 @@ func autoConvert_v1beta1_AWSManagedMachinePoolStatus_To_v1beta2_AWSManagedMachin out.LaunchTemplateVersion = (*string)(unsafe.Pointer(in.LaunchTemplateVersion)) out.FailureReason = (*string)(unsafe.Pointer(in.FailureReason)) out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } @@ -786,7 +775,7 @@ func autoConvert_v1beta2_AWSManagedMachinePoolStatus_To_v1beta1_AWSManagedMachin out.LaunchTemplateVersion = (*string)(unsafe.Pointer(in.LaunchTemplateVersion)) out.FailureReason = (*string)(unsafe.Pointer(in.FailureReason)) out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } @@ -917,7 +906,7 @@ func autoConvert_v1beta1_FargateProfileStatus_To_v1beta2_FargateProfileStatus(in out.Ready = in.Ready out.FailureReason = (*string)(unsafe.Pointer(in.FailureReason)) out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } @@ -930,7 +919,7 @@ func autoConvert_v1beta2_FargateProfileStatus_To_v1beta1_FargateProfileStatus(in out.Ready = in.Ready out.FailureReason = (*string)(unsafe.Pointer(in.FailureReason)) out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } diff --git a/exp/api/v1beta1/zz_generated.deepcopy.go b/exp/api/v1beta1/zz_generated.deepcopy.go index da355ddf67..3919507c2d 100644 --- a/exp/api/v1beta1/zz_generated.deepcopy.go +++ b/exp/api/v1beta1/zz_generated.deepcopy.go @@ -23,7 +23,7 @@ package v1beta1 import ( runtime "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + corev1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -263,7 +263,7 @@ func (in *AWSMachinePoolStatus) DeepCopyInto(out *AWSMachinePoolStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -490,7 +490,7 @@ func (in *AWSManagedMachinePoolStatus) DeepCopyInto(out *AWSManagedMachinePoolSt } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -632,7 +632,7 @@ func (in *FargateProfileStatus) DeepCopyInto(out *FargateProfileStatus) { } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/exp/api/v1beta2/awsfargateprofile_types.go b/exp/api/v1beta2/awsfargateprofile_types.go index 3869fd42fa..453fd4b724 100644 --- a/exp/api/v1beta2/awsfargateprofile_types.go +++ b/exp/api/v1beta2/awsfargateprofile_types.go @@ -23,7 +23,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) var ( @@ -142,7 +142,7 @@ type FargateProfileStatus struct { // Conditions defines current state of the Fargate profile. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -163,12 +163,12 @@ type AWSFargateProfile struct { } // GetConditions returns the observations of the operational state of the AWSFargateProfile resource. -func (r *AWSFargateProfile) GetConditions() clusterv1.Conditions { +func (r *AWSFargateProfile) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the AWSFargateProfile to the predescribed clusterv1.Conditions. -func (r *AWSFargateProfile) SetConditions(conditions clusterv1.Conditions) { +// SetConditions sets the underlying service state of the AWSFargateProfile to the predescribed clusterv1beta1.Conditions. +func (r *AWSFargateProfile) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/exp/api/v1beta2/awsfargateprofile_webhook.go b/exp/api/v1beta2/awsfargateprofile_webhook.go index ed38ff73ae..f0cb893cb8 100644 --- a/exp/api/v1beta2/awsfargateprofile_webhook.go +++ b/exp/api/v1beta2/awsfargateprofile_webhook.go @@ -30,7 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook/admission" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/eks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( @@ -53,8 +53,10 @@ func (r *AWSFargateProfile) SetupWebhookWithManager(mgr ctrl.Manager) error { type awsFargateProfileWebhook struct{} -var _ webhook.CustomDefaulter = &awsFargateProfileWebhook{} -var _ webhook.CustomValidator = &awsFargateProfileWebhook{} +var ( + _ webhook.CustomDefaulter = &awsFargateProfileWebhook{} + _ webhook.CustomValidator = &awsFargateProfileWebhook{} +) // Default will set default values for the AWSFargateProfile. func (*awsFargateProfileWebhook) Default(_ context.Context, obj runtime.Object) error { @@ -66,7 +68,7 @@ func (*awsFargateProfileWebhook) Default(_ context.Context, obj runtime.Object) if r.Labels == nil { r.Labels = make(map[string]string) } - r.Labels[clusterv1.ClusterNameLabel] = r.Spec.ClusterName + r.Labels[clusterv1beta1.ClusterNameLabel] = r.Spec.ClusterName if r.Spec.ProfileName == "" { name, err := eks.GenerateEKSName(r.Name, r.Namespace, maxProfileNameLength) diff --git a/exp/api/v1beta2/awsfargateprofile_webhook_test.go b/exp/api/v1beta2/awsfargateprofile_webhook_test.go index 7849e0bb35..fa67c27418 100644 --- a/exp/api/v1beta2/awsfargateprofile_webhook_test.go +++ b/exp/api/v1beta2/awsfargateprofile_webhook_test.go @@ -27,7 +27,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/eks" utildefaulting "sigs.k8s.io/cluster-api-provider-aws/v2/util/defaulting" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func TestAWSFargateProfileDefault(t *testing.T) { @@ -40,7 +40,7 @@ func TestAWSFargateProfileDefault(t *testing.T) { g := NewWithT(t) err := (&awsFargateProfileWebhook{}).Default(context.Background(), fargate) g.Expect(err).NotTo(HaveOccurred()) - g.Expect(fargate.GetLabels()[clusterv1.ClusterNameLabel]).To(BeEquivalentTo(fargate.Spec.ClusterName)) + g.Expect(fargate.GetLabels()[clusterv1beta1.ClusterNameLabel]).To(BeEquivalentTo(fargate.Spec.ClusterName)) name, err := eks.GenerateEKSName(fargate.Name, fargate.Namespace, maxProfileNameLength) g.Expect(err).NotTo(HaveOccurred()) g.Expect(fargate.Spec.ProfileName).To(BeEquivalentTo(name)) diff --git a/exp/api/v1beta2/awsmachinepool_types.go b/exp/api/v1beta2/awsmachinepool_types.go index ef0a219513..a5eb28ee20 100644 --- a/exp/api/v1beta2/awsmachinepool_types.go +++ b/exp/api/v1beta2/awsmachinepool_types.go @@ -23,7 +23,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // Constants block. @@ -204,7 +204,7 @@ type AWSMachinePoolStatus struct { // Conditions defines current service state of the AWSMachinePool. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // Instances contains the status for each instance in the pool // +optional @@ -306,12 +306,12 @@ func init() { } // GetConditions returns the observations of the operational state of the AWSMachinePool resource. -func (r *AWSMachinePool) GetConditions() clusterv1.Conditions { +func (r *AWSMachinePool) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the AWSMachinePool to the predescribed clusterv1.Conditions. -func (r *AWSMachinePool) SetConditions(conditions clusterv1.Conditions) { +// SetConditions sets the underlying service state of the AWSMachinePool to the predescribed clusterv1beta1.Conditions. +func (r *AWSMachinePool) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/exp/api/v1beta2/awsmanagedmachinepool_types.go b/exp/api/v1beta2/awsmanagedmachinepool_types.go index 0aeb7be0dc..8e761a506f 100644 --- a/exp/api/v1beta2/awsmanagedmachinepool_types.go +++ b/exp/api/v1beta2/awsmanagedmachinepool_types.go @@ -23,7 +23,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // ManagedMachineAMIType specifies which AWS AMI to use for a managed MachinePool. @@ -214,6 +214,10 @@ type AWSManagedMachinePoolSpec struct { // AWSLifecycleHooks specifies lifecycle hooks for the managed node group. // +optional AWSLifecycleHooks []AWSLifecycleHook `json:"lifecycleHooks,omitempty"` + + // NodeRepairConfig specifies the node auto repair configuration for the managed node group. + // +optional + NodeRepairConfig *NodeRepairConfig `json:"nodeRepairConfig,omitempty"` } // ManagedMachinePoolScaling specifies scaling options. @@ -294,7 +298,16 @@ type AWSManagedMachinePoolStatus struct { // Conditions defines current service state of the managed machine pool // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` +} + +// NodeRepairConfig defines the node auto repair configuration for managed node groups. +type NodeRepairConfig struct { + // Enabled specifies whether node auto repair is enabled for the node group. + // When enabled, EKS will automatically repair unhealthy nodes by replacing them. + // +optional + // +kubebuilder:default=false + Enabled *bool `json:"enabled,omitempty"` } // +kubebuilder:object:root=true @@ -314,12 +327,12 @@ type AWSManagedMachinePool struct { } // GetConditions returns the observations of the operational state of the AWSManagedMachinePool resource. -func (r *AWSManagedMachinePool) GetConditions() clusterv1.Conditions { +func (r *AWSManagedMachinePool) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the AWSManagedMachinePool to the predescribed clusterv1.Conditions. -func (r *AWSManagedMachinePool) SetConditions(conditions clusterv1.Conditions) { +// SetConditions sets the underlying service state of the AWSManagedMachinePool to the predescribed clusterv1beta1.Conditions. +func (r *AWSManagedMachinePool) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/exp/api/v1beta2/conditions_consts.go b/exp/api/v1beta2/conditions_consts.go index 0f3d8675ca..7e656f93c5 100644 --- a/exp/api/v1beta2/conditions_consts.go +++ b/exp/api/v1beta2/conditions_consts.go @@ -16,11 +16,13 @@ limitations under the License. package v1beta2 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +import ( + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" +) const ( // ASGReadyCondition reports on current status of the autoscaling group. Ready indicates the group is provisioned. - ASGReadyCondition clusterv1.ConditionType = "ASGReady" + ASGReadyCondition clusterv1beta1.ConditionType = "ASGReady" // ASGNotFoundReason used when the autoscaling group couldn't be retrieved. ASGNotFoundReason = "ASGNotFound" // ASGProvisionFailedReason used for failures during autoscaling group provisioning. @@ -29,7 +31,7 @@ const ( ASGDeletionInProgress = "ASGDeletionInProgress" // LaunchTemplateReadyCondition represents the status of an AWSMachinePool's associated Launch Template. - LaunchTemplateReadyCondition clusterv1.ConditionType = "LaunchTemplateReady" + LaunchTemplateReadyCondition clusterv1beta1.ConditionType = "LaunchTemplateReady" // LaunchTemplateNotFoundReason is used when an associated Launch Template can't be found. LaunchTemplateNotFoundReason = "LaunchTemplateNotFound" // LaunchTemplateCreateFailedReason used for failures during Launch Template creation. @@ -38,9 +40,9 @@ const ( LaunchTemplateReconcileFailedReason = "LaunchTemplateReconcileFailed" // PreLaunchTemplateUpdateCheckCondition reports if all prerequisite are met for launch template update. - PreLaunchTemplateUpdateCheckCondition clusterv1.ConditionType = "PreLaunchTemplateUpdateCheckSuccess" + PreLaunchTemplateUpdateCheckCondition clusterv1beta1.ConditionType = "PreLaunchTemplateUpdateCheckSuccess" // PostLaunchTemplateUpdateOperationCondition reports on successfully completes post launch template update operation. - PostLaunchTemplateUpdateOperationCondition clusterv1.ConditionType = "PostLaunchTemplateUpdateOperationSuccess" + PostLaunchTemplateUpdateOperationCondition clusterv1beta1.ConditionType = "PostLaunchTemplateUpdateOperationSuccess" // PreLaunchTemplateUpdateCheckFailedReason used to report when not all prerequisite are met for launch template update. PreLaunchTemplateUpdateCheckFailedReason = "PreLaunchTemplateUpdateCheckFailed" @@ -48,7 +50,7 @@ const ( PostLaunchTemplateUpdateOperationFailedReason = "PostLaunchTemplateUpdateOperationFailed" // InstanceRefreshStartedCondition reports on successfully starting instance refresh. - InstanceRefreshStartedCondition clusterv1.ConditionType = "InstanceRefreshStarted" + InstanceRefreshStartedCondition clusterv1beta1.ConditionType = "InstanceRefreshStarted" // InstanceRefreshNotReadyReason used to report instance refresh is not initiated. // If there are instance refreshes that are in progress, then a new instance refresh request will fail. InstanceRefreshNotReadyReason = "InstanceRefreshNotReady" @@ -60,7 +62,7 @@ const ( // AWSMachineDeletionFailed reports if deleting AWSMachines failed. AWSMachineDeletionFailed = "AWSMachineDeletionFailed" // LifecycleHookReadyCondition reports on the status of the lifecycle hook. - LifecycleHookReadyCondition clusterv1.ConditionType = "LifecycleHookReady" + LifecycleHookReadyCondition clusterv1beta1.ConditionType = "LifecycleHookReady" // LifecycleHookCreationFailedReason used for failures during lifecycle hook creation. LifecycleHookCreationFailedReason = "LifecycleHookCreationFailed" // LifecycleHookUpdateFailedReason used for failures during lifecycle hook update. @@ -71,7 +73,7 @@ const ( const ( // EKSNodegroupReadyCondition condition reports on the successful reconciliation of eks control plane. - EKSNodegroupReadyCondition clusterv1.ConditionType = "EKSNodegroupReady" + EKSNodegroupReadyCondition clusterv1beta1.ConditionType = "EKSNodegroupReady" // EKSNodegroupReconciliationFailedReason used to report failures while reconciling EKS control plane. EKSNodegroupReconciliationFailedReason = "EKSNodegroupReconciliationFailed" // WaitingForEKSControlPlaneReason used when the machine pool is waiting for @@ -81,10 +83,10 @@ const ( const ( // EKSFargateProfileReadyCondition condition reports on the successful reconciliation of eks control plane. - EKSFargateProfileReadyCondition clusterv1.ConditionType = "EKSFargateProfileReady" + EKSFargateProfileReadyCondition clusterv1beta1.ConditionType = "EKSFargateProfileReady" // EKSFargateCreatingCondition condition reports on whether the fargate // profile is creating. - EKSFargateCreatingCondition clusterv1.ConditionType = "EKSFargateCreating" + EKSFargateCreatingCondition clusterv1beta1.ConditionType = "EKSFargateCreating" // EKSFargateDeletingCondition used to report that the profile is deleting. EKSFargateDeletingCondition = "EKSFargateDeleting" // EKSFargateReconciliationFailedReason used to report failures while reconciling EKS control plane. @@ -104,13 +106,13 @@ const ( const ( // IAMNodegroupRolesReadyCondition condition reports on the successful // reconciliation of EKS nodegroup iam roles. - IAMNodegroupRolesReadyCondition clusterv1.ConditionType = "IAMNodegroupRolesReady" + IAMNodegroupRolesReadyCondition clusterv1beta1.ConditionType = "IAMNodegroupRolesReady" // IAMNodegroupRolesReconciliationFailedReason used to report failures while // reconciling EKS nodegroup iam roles. IAMNodegroupRolesReconciliationFailedReason = "IAMNodegroupRolesReconciliationFailed" // IAMFargateRolesReadyCondition condition reports on the successful // reconciliation of EKS nodegroup iam roles. - IAMFargateRolesReadyCondition clusterv1.ConditionType = "IAMFargateRolesReady" + IAMFargateRolesReadyCondition clusterv1beta1.ConditionType = "IAMFargateRolesReady" // IAMFargateRolesReconciliationFailedReason used to report failures while // reconciling EKS nodegroup iam roles. IAMFargateRolesReconciliationFailedReason = "IAMFargateRolesReconciliationFailed" @@ -118,9 +120,9 @@ const ( const ( // RosaMachinePoolReadyCondition condition reports on the successful reconciliation of rosa machinepool. - RosaMachinePoolReadyCondition clusterv1.ConditionType = "RosaMachinePoolReady" + RosaMachinePoolReadyCondition clusterv1beta1.ConditionType = "RosaMachinePoolReady" // RosaMachinePoolUpgradingCondition condition reports whether ROSAMachinePool is upgrading or not. - RosaMachinePoolUpgradingCondition clusterv1.ConditionType = "RosaMachinePoolUpgrading" + RosaMachinePoolUpgradingCondition clusterv1beta1.ConditionType = "RosaMachinePoolUpgrading" // WaitingForRosaControlPlaneReason used when the machine pool is waiting for // ROSA control plane infrastructure to be ready before proceeding. @@ -129,3 +131,23 @@ const ( // RosaMachinePoolReconciliationFailedReason used to report failures while reconciling ROSAMachinePool. RosaMachinePoolReconciliationFailedReason = "ReconciliationFailed" ) + +const ( + // ROSANetworkReadyCondition condition reports on the successful reconciliation of ROSANetwork. + ROSANetworkReadyCondition clusterv1beta1.ConditionType = "ROSANetworkReady" + + // ROSANetworkCreatingReason used when ROSANetwork is being created. + ROSANetworkCreatingReason = "Creating" + + // ROSANetworkCreatedReason used when ROSANetwork is created. + ROSANetworkCreatedReason = "Created" + + // ROSANetworkFailedReason used when rosaNetwork creation failed. + ROSANetworkFailedReason = "Failed" + + // ROSANetworkDeletingReason used when ROSANetwork is being deleted. + ROSANetworkDeletingReason = "Deleting" + + // ROSANetworkDeletionFailedReason used to report failures while deleting ROSANetwork. + ROSANetworkDeletionFailedReason = "DeletionFailed" +) diff --git a/exp/api/v1beta2/finalizers.go b/exp/api/v1beta2/finalizers.go index 1125449285..f0cffa7958 100644 --- a/exp/api/v1beta2/finalizers.go +++ b/exp/api/v1beta2/finalizers.go @@ -28,4 +28,7 @@ const ( // RosaMachinePoolFinalizer allows the controller to clean up resources on delete. RosaMachinePoolFinalizer = "rosamachinepools.infrastructure.cluster.x-k8s.io" + + // RosaRoleConfigFinalizer allows the controller to clean up resources on delete. + RosaRoleConfigFinalizer = "rosaroleconfigs.infrastructure.cluster.x-k8s.io" ) diff --git a/exp/api/v1beta2/rosacluster_types.go b/exp/api/v1beta2/rosacluster_types.go index 3303125d1c..2b24e5f0e2 100644 --- a/exp/api/v1beta2/rosacluster_types.go +++ b/exp/api/v1beta2/rosacluster_types.go @@ -19,14 +19,14 @@ package v1beta2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // ROSAClusterSpec defines the desired state of ROSACluster. type ROSAClusterSpec struct { // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. // +optional - ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint"` + ControlPlaneEndpoint clusterv1beta1.APIEndpoint `json:"controlPlaneEndpoint"` } // ROSAClusterStatus defines the observed state of ROSACluster. @@ -37,11 +37,11 @@ type ROSAClusterStatus struct { // FailureDomains specifies a list fo available availability zones that can be used // +optional - FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` + FailureDomains clusterv1beta1.FailureDomains `json:"failureDomains,omitempty"` // Conditions defines current service state of the ROSACluster. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -72,13 +72,13 @@ type ROSAClusterList struct { // GetConditions returns the observations of the operational state of the // ROSACluster resource. -func (r *ROSACluster) GetConditions() clusterv1.Conditions { +func (r *ROSACluster) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } // SetConditions sets the underlying service state of the ROSACluster to the -// predescribed clusterv1.Conditions. -func (r *ROSACluster) SetConditions(conditions clusterv1.Conditions) { +// predescribed clusterv1beta1.Conditions. +func (r *ROSACluster) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/exp/api/v1beta2/rosamachinepool_types.go b/exp/api/v1beta2/rosamachinepool_types.go index 0dc3af30ed..9c379586c6 100644 --- a/exp/api/v1beta2/rosamachinepool_types.go +++ b/exp/api/v1beta2/rosamachinepool_types.go @@ -22,7 +22,8 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // RosaMachinePoolSpec defines the desired state of RosaMachinePool. @@ -79,7 +80,7 @@ type RosaMachinePoolSpec struct { // Autoscaling specifies auto scaling behaviour for this MachinePool. // required if Replicas is not configured // +optional - Autoscaling *RosaMachinePoolAutoScaling `json:"autoscaling,omitempty"` + Autoscaling *rosacontrolplanev1.AutoScaling `json:"autoscaling,omitempty"` // TuningConfigs specifies the names of the tuning configs to be applied to this MachinePool. // Tuning configs must already exist. @@ -118,6 +119,12 @@ type RosaMachinePoolSpec struct { // // +optional UpdateConfig *RosaUpdateConfig `json:"updateConfig,omitempty"` + + // CapacityReservationID specifies the ID of an AWS On-Demand Capacity Reservation and Capacity Blocks for ML. + // The CapacityReservationID must be pre-created in advance, before creating a NodePool. + // + // +optional + CapacityReservationID string `json:"capacityReservationID,omitempty"` } // RosaTaint represents a taint to be applied to a node. @@ -139,14 +146,6 @@ type RosaTaint struct { Effect corev1.TaintEffect `json:"effect"` } -// RosaMachinePoolAutoScaling specifies scaling options. -type RosaMachinePoolAutoScaling struct { - // +kubebuilder:validation:Minimum=1 - MinReplicas int `json:"minReplicas,omitempty"` - // +kubebuilder:validation:Minimum=1 - MaxReplicas int `json:"maxReplicas,omitempty"` -} - // RosaUpdateConfig specifies update configuration type RosaUpdateConfig struct { // RollingUpdate specifies MaxUnavailable & MaxSurge number of nodes during update. @@ -207,7 +206,7 @@ type RosaMachinePoolStatus struct { Replicas int32 `json:"replicas"` // Conditions defines current service state of the managed machine pool // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // FailureMessage will be set in the event that there is a terminal problem // reconciling the state and will be set to a descriptive error message. // @@ -253,12 +252,12 @@ type ROSAMachinePoolList struct { } // GetConditions returns the observations of the operational state of the RosaMachinePool resource. -func (r *ROSAMachinePool) GetConditions() clusterv1.Conditions { +func (r *ROSAMachinePool) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the RosaMachinePool to the predescribed clusterv1.Conditions. -func (r *ROSAMachinePool) SetConditions(conditions clusterv1.Conditions) { +// SetConditions sets the underlying service state of the RosaMachinePool to the predescribed clusterv1beta1.Conditions. +func (r *ROSAMachinePool) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/exp/api/v1beta2/rosanetwork_types.go b/exp/api/v1beta2/rosanetwork_types.go new file mode 100644 index 0000000000..0cb9922530 --- /dev/null +++ b/exp/api/v1beta2/rosanetwork_types.go @@ -0,0 +1,149 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" +) + +// ROSANetworkFinalizer allows the controller to clean up resources on delete. +const ROSANetworkFinalizer = "rosanetwork.infrastructure.cluster.x-k8s.io" + +// ROSANetworkSpec defines the desired state of ROSANetwork +type ROSANetworkSpec struct { + // The name of the cloudformation stack under which the network infrastructure would be created + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="stackName is immutable" + // +kubebuilder:validation:Required + StackName string `json:"stackName"` + + // The AWS region in which the components of ROSA network infrastruture are to be crated + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="region is immutable" + // +kubebuilder:validation:Required + Region string `json:"region"` + + // The number of availability zones to be used for creation of the network infrastructure. + // You can specify anything between one and four, depending on the chosen AWS region. + // Either AvailabilityZoneCount OR AvailabilityZones must be set. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="availabilityZoneCount is immutable" + // +optional + AvailabilityZoneCount int `json:"availabilityZoneCount,omitempty"` + + // The list of availability zones to be used for creation of the network infrastructure. + // You can specify anything between one and four valid availability zones from a given region. + // Either AvailabilityZones OR AvailabilityZoneCount must be set. + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="availabilityZones is immutable" + // +optional + AvailabilityZones []string `json:"availabilityZones,omitempty"` + + // CIDR block to be used for the VPC + // +kubebuilder:validation:Format=cidr + // +kubebuilder:validation:Required + CIDRBlock string `json:"cidrBlock"` + + // IdentityRef is a reference to an identity to be used when reconciling rosa network. + // If no identity is specified, the default identity for this controller will be used. + // +optional + IdentityRef *infrav1.AWSIdentityReference `json:"identityRef,omitempty"` + + // StackTags is an optional set of tags to add to the created cloudformation stack. + // The stack tags will then be automatically applied to the supported AWS resources (VPC, subnets, ...). + // +optional + StackTags Tags `json:"stackTags,omitempty"` +} + +// ROSANetworkSubnet groups public and private subnet and the availability zone in which the two subnets got created +type ROSANetworkSubnet struct { + // Availability zone of the subnet pair, for example us-west-2a + AvailabilityZone string `json:"availabilityZone"` + + // ID of the public subnet, for example subnet-0f7e49a3ce68ff338 + PublicSubnet string `json:"publicSubnet"` + + // ID of the private subnet, for example subnet-07a20d6c41af2b725 + PrivateSubnet string `json:"privateSubnet"` +} + +// CFResource groups information pertaining to a resource created as a part of a cloudformation stack +type CFResource struct { + // Type of the created resource: AWS::EC2::VPC, AWS::EC2::Subnet, ... + ResourceType string `json:"resource"` + + // LogicalResourceID of the created resource. + LogicalID string `json:"logicalId"` + + // PhysicalResourceID of the created resource. + PhysicalID string `json:"physicalId"` + + // Status of the resource: CREATE_IN_PROGRESS, CREATE_COMPLETE, ... + Status string `json:"status"` + + // Message pertaining to the status of the resource + Reason string `json:"reason"` +} + +// ROSANetworkStatus defines the observed state of ROSANetwork +type ROSANetworkStatus struct { + // Array of created private, public subnets and availability zones, grouped by availability zones + Subnets []ROSANetworkSubnet `json:"subnets,omitempty"` + + // Resources created in the cloudformation stack + Resources []CFResource `json:"resources,omitempty"` + + // Conditions specifies the conditions for ROSANetwork + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=rosanetworks,shortName=rosanet,scope=Namespaced,categories=cluster-api +// +kubebuilder:storageversion +// +kubebuilder:subresource:status + +// ROSANetwork is the schema for the rosanetworks API +type ROSANetwork struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ROSANetworkSpec `json:"spec,omitempty"` + Status ROSANetworkStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ROSANetworkList contains a list of ROSANetwork +type ROSANetworkList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ROSANetwork `json:"items"` +} + +// GetConditions returns the observations of the operational state of the ROSANetwork resource. +func (r *ROSANetwork) GetConditions() clusterv1beta1.Conditions { + return r.Status.Conditions +} + +// SetConditions sets the underlying service state of the ROSANetwork to the predescribed clusterv1beta1.Conditions. +func (r *ROSANetwork) SetConditions(conditions clusterv1beta1.Conditions) { + r.Status.Conditions = conditions +} + +func init() { + SchemeBuilder.Register(&ROSANetwork{}, &ROSANetworkList{}) +} diff --git a/exp/api/v1beta2/rosanetwork_webhook.go b/exp/api/v1beta2/rosanetwork_webhook.go new file mode 100644 index 0000000000..0465d892a2 --- /dev/null +++ b/exp/api/v1beta2/rosanetwork_webhook.go @@ -0,0 +1,73 @@ +package v1beta2 + +import ( + "context" + "fmt" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// SetupWebhookWithManager will setup the webhooks for the ROSANetwork. +func (r *ROSANetwork) SetupWebhookWithManager(mgr ctrl.Manager) error { + w := new(rosaNetworkWebhook) + return ctrl.NewWebhookManagedBy(mgr). + For(r). + WithValidator(w). + WithDefaulter(w). + Complete() +} + +// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta2-rosanetwork,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=rosanetworks,versions=v1beta2,name=validation.rosanetwork.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 +// +kubebuilder:webhook:verbs=create;update,path=/mutate-infrastructure-cluster-x-k8s-io-v1beta2-rosanetwork,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=rosanetworks,versions=v1beta2,name=default.rosanetwork.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 + +type rosaNetworkWebhook struct{} + +var _ webhook.CustomDefaulter = &rosaNetworkWebhook{} +var _ webhook.CustomValidator = &rosaNetworkWebhook{} + +// ValidateCreate implements admission.Validator. +func (r *rosaNetworkWebhook) ValidateCreate(ctx context.Context, obj runtime.Object) (warnings admission.Warnings, err error) { + rosaNet, ok := obj.(*ROSANetwork) + if !ok { + return nil, fmt.Errorf("expected an ROSANetwork object but got %T", rosaNet) + } + + var allErrs field.ErrorList + if rosaNet.Spec.AvailabilityZoneCount == 0 && len(rosaNet.Spec.AvailabilityZones) == 0 { + err := field.Invalid(field.NewPath("spec.AvailabilityZones"), rosaNet.Spec.AvailabilityZones, "Either AvailabilityZones OR AvailabilityZoneCount must be set.") + allErrs = append(allErrs, err) + } + if rosaNet.Spec.AvailabilityZoneCount != 0 && len(rosaNet.Spec.AvailabilityZones) > 0 { + err := field.Invalid(field.NewPath("spec.AvailabilityZones"), rosaNet.Spec.AvailabilityZones, "Either AvailabilityZones OR AvailabilityZoneCount can be set.") + allErrs = append(allErrs, err) + } + + if len(allErrs) > 0 { + return nil, apierrors.NewInvalid( + rosaNet.GroupVersionKind().GroupKind(), + rosaNet.Name, + allErrs) + } + + return nil, nil +} + +// ValidateUpdate implements admission.Validator. +func (r *rosaNetworkWebhook) ValidateUpdate(ctx context.Context, old runtime.Object, updated runtime.Object) (warnings admission.Warnings, err error) { + return nil, nil +} + +// ValidateDelete implements admission.Validator. +func (r *rosaNetworkWebhook) ValidateDelete(ctx context.Context, obj runtime.Object) (warnings admission.Warnings, err error) { + return nil, nil +} + +// Default implements admission.Defaulter. +func (r *rosaNetworkWebhook) Default(ctx context.Context, obj runtime.Object) error { + return nil +} diff --git a/exp/api/v1beta2/rosaroleconfig_types.go b/exp/api/v1beta2/rosaroleconfig_types.go new file mode 100644 index 0000000000..41e0c21add --- /dev/null +++ b/exp/api/v1beta2/rosaroleconfig_types.go @@ -0,0 +1,248 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" +) + +// OidcProviderType set to Managed or UnManaged +type OidcProviderType string + +const ( + // Managed OIDC Provider type + Managed OidcProviderType = "Managed" + + // Unmanaged OIDC Provider type + Unmanaged OidcProviderType = "Unmanaged" +) + +// Operator Role const +const ( + // IngressOperatorARNSuffix is the suffix for the ingress operator role. + IngressOperatorARNSuffix = "-openshift-ingress-operator-cloud-credentials" + + // ImageRegistryARNSuffix is the suffix for the image registry operator role. + ImageRegistryARNSuffix = "-openshift-image-registry-installer-cloud-credentials" + + // StorageARNSuffix is the suffix for the storage operator role. + StorageARNSuffix = "-openshift-cluster-csi-drivers-ebs-cloud-credentials" + + // NetworkARNSuffix is the suffix for the network operator role. + NetworkARNSuffix = "-openshift-cloud-network-config-controller-cloud-credentials" + + // KubeCloudControllerARNSuffix is the suffix for the kube cloud controller role. + KubeCloudControllerARNSuffix = "-kube-system-kube-controller-manager" + + // NodePoolManagementARNSuffix is the suffix for the node pool management role. + NodePoolManagementARNSuffix = "-kube-system-capa-controller-manager" + + // ControlPlaneOperatorARNSuffix is the suffix for the control plane operator role. + ControlPlaneOperatorARNSuffix = "-kube-system-control-plane-operator" + + // KMSProviderARNSuffix is the suffix for the kms provider role. + KMSProviderARNSuffix = "-kube-system-kms-provider" +) + +// Account Role const +const ( + // HCPROSAInstallerRole is the suffix for installer account role + HCPROSAInstallerRole = "-HCP-ROSA-Installer-Role" + + // HCPROSASupportRole is the suffix for support account role + HCPROSASupportRole = "-HCP-ROSA-Support-Role" + + // HCPROSAWorkerRole is the suffix for worker account role + HCPROSAWorkerRole = "-HCP-ROSA-Worker-Role" +) + +const ( + // RosaRoleConfigReadyCondition condition reports on the successful reconciliation of RosaRoleConfig. + RosaRoleConfigReadyCondition = "RosaRoleConfigReady" + + // RosaRoleConfigDeletionFailedReason used to report failures while deleting RosaRoleConfig. + RosaRoleConfigDeletionFailedReason = "DeletionFailed" + + // RosaRoleConfigReconciliationFailedReason used to report reconciliation failures. + RosaRoleConfigReconciliationFailedReason = "ReconciliationFailed" + + // RosaRoleConfigDeletionStarted used to indicate that the deletion of RosaRoleConfig has started. + RosaRoleConfigDeletionStarted = "DeletionStarted" + + // RosaRoleConfigCreatedReason used to indicate that the RosaRoleConfig has been created. + RosaRoleConfigCreatedReason = "Created" +) + +// ROSARoleConfigSpec defines the desired state of ROSARoleConfig +type ROSARoleConfigSpec struct { + // AccountRoleConfig defines account-wide IAM roles before creating your ROSA cluster. + AccountRoleConfig AccountRoleConfig `json:"accountRoleConfig"` + + // OperatorRoleConfig defines cluster-specific operator IAM roles based on your cluster configuration. + OperatorRoleConfig OperatorRoleConfig `json:"operatorRoleConfig"` + + // IdentityRef is a reference to an identity to be used when reconciling the ROSA Role Config. + // If no identity is specified, the default identity for this controller will be used. + // +optional + IdentityRef *infrav1.AWSIdentityReference `json:"identityRef,omitempty"` + + // CredentialsSecretRef references a secret with necessary credentials to connect to the OCM API. + // +optional + CredentialsSecretRef *corev1.LocalObjectReference `json:"credentialsSecretRef,omitempty"` + + // OIDC provider type values are Managed or UnManaged. When set to Unmanged OperatorRoleConfig OIDCID field must be provided. + // +kubebuilder:validation:Enum=Managed;Unmanaged + // +kubebuilder:default=Managed + OidcProviderType OidcProviderType `json:"oidcProviderType"` +} + +// AccountRoleConfig defines account IAM roles before creating your ROSA cluster. +type AccountRoleConfig struct { + // User-defined prefix for all generated AWS account role + // +kubebuilder:validation:Required + // +kubebuilder:validation:MaxLength:=4 + // +kubebuilder:validation:Pattern:=`^[a-z]([-a-z0-9]*[a-z0-9])?$` + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="prefix is immutable" + Prefix string `json:"prefix"` + + // The ARN of the policy that is used to set the permissions boundary for the account roles. + // +optional + PermissionsBoundaryARN string `json:"permissionsBoundaryARN,omitempty"` + + // The arn path for the account/operator roles as well as their policies. + // +optional + Path string `json:"path,omitempty"` + + // Version of OpenShift that will be used to the roles tag in formate of x.y.z example; "4.19.0" + // Setting the role OpenShift version tag does not affect the associated ROSAControlplane version. + // +kubebuilder:validation:Required + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="version is immutable" + Version string `json:"version"` + + // SharedVPCConfig is used to set up shared VPC. + // +optional + SharedVPCConfig SharedVPCConfig `json:"sharedVPCConfig,omitempty"` +} + +// OperatorRoleConfig defines cluster-specific operator IAM roles based on your cluster configuration. +type OperatorRoleConfig struct { + // User-defined prefix for generated AWS operator roles. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MaxLength:=4 + // +kubebuilder:validation:Pattern:=`^[a-z]([-a-z0-9]*[a-z0-9])?$` + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="prefix is immutable" + Prefix string `json:"prefix"` + + // The ARN of the policy that is used to set the permissions boundary for the operator roles. + // +optional + PermissionsBoundaryARN string `json:"permissionsBoundaryARN,omitempty"` + + // SharedVPCConfig is used to set up shared VPC. + // +optional + SharedVPCConfig SharedVPCConfig `json:"sharedVPCConfig,omitempty"` + + // OIDCID is the ID of the OIDC config that will be used to create the operator roles. + // Cannot be set when OidcProviderType set to Managed + // +optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf", message="oidcID is immutable" + OIDCID string `json:"oidcID,omitempty"` +} + +// SharedVPCConfig is used to set up shared VPC. +type SharedVPCConfig struct { + // Role ARN associated with the private hosted zone used for Hosted Control Plane cluster shared VPC, this role contains policies to be used with Route 53 + RouteRoleARN string `json:"routeRoleARN,omitempty"` + + // Role ARN associated with the shared VPC used for Hosted Control Plane clusters, this role contains policies to be used with the VPC endpoint + VPCEndpointRoleARN string `json:"vpcEndpointRoleArn,omitempty"` +} + +// ROSARoleConfigStatus defines the observed state of ROSARoleConfig +type ROSARoleConfigStatus struct { + // ID of created OIDC config + OIDCID string `json:"oidcID,omitempty"` + + // Create OIDC provider for operators to authenticate against in an STS cluster. + OIDCProviderARN string `json:"oidcProviderARN,omitempty"` + + // Created Account roles that can be used to + AccountRolesRef AccountRolesRef `json:"accountRolesRef,omitempty"` + + // AWS IAM roles used to perform credential requests by the openshift operators. + OperatorRolesRef rosacontrolplanev1.AWSRolesRef `json:"operatorRolesRef,omitempty"` + + // Conditions specifies the ROSARoleConfig conditions + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` +} + +// AccountRolesRef defscribes ARNs used as Account roles. +type AccountRolesRef struct { + // InstallerRoleARN is an AWS IAM role that OpenShift Cluster Manager will assume to create the cluster.. + InstallerRoleARN string `json:"installerRoleARN,omitempty"` + + // SupportRoleARN is an AWS IAM role used by Red Hat SREs to enable + // access to the cluster account in order to provide support. + SupportRoleARN string `json:"supportRoleARN,omitempty"` + + // WorkerRoleARN is an AWS IAM role that will be attached to worker instances. + WorkerRoleARN string `json:"workerRoleARN,omitempty"` +} + +// ROSARoleConfig is the Schema for the rosaroleconfigs API +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=rosaroleconfigs,scope=Namespaced,categories=cluster-api,shortName=rosarole +// +kubebuilder:storageversion +// +kubebuilder:subresource:status +type ROSARoleConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ROSARoleConfigSpec `json:"spec,omitempty"` + Status ROSARoleConfigStatus `json:"status,omitempty"` +} + +// ROSARoleConfigList contains a list of ROSARoleConfig +// +kubebuilder:object:root=true +type ROSARoleConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ROSARoleConfig `json:"items"` +} + +// SetConditions sets the conditions of the ROSARoleConfig. +func (r *ROSARoleConfig) SetConditions(conditions clusterv1beta1.Conditions) { + r.Status.Conditions = conditions +} + +// GetConditions returns the observations of the operational state of the RosaNetwork resource. +func (r *ROSARoleConfig) GetConditions() clusterv1beta1.Conditions { + return r.Status.Conditions +} + +// IsSharedVPC checks if the shared VPC config is set. +func (s SharedVPCConfig) IsSharedVPC() bool { + return s.VPCEndpointRoleARN != "" || s.RouteRoleARN != "" +} + +func init() { + SchemeBuilder.Register(&ROSARoleConfig{}, &ROSARoleConfigList{}) +} diff --git a/exp/api/v1beta2/rosaroleconfig_webhook.go b/exp/api/v1beta2/rosaroleconfig_webhook.go new file mode 100644 index 0000000000..5e9d36a30b --- /dev/null +++ b/exp/api/v1beta2/rosaroleconfig_webhook.go @@ -0,0 +1,79 @@ +package v1beta2 + +import ( + "context" + "fmt" + + "github.com/blang/semver" + apierrors "k8s.io/apimachinery/pkg/api/errors" + runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// SetupWebhookWithManager will setup the webhooks for the ROSARoleConfig. +func (r *ROSARoleConfig) SetupWebhookWithManager(mgr ctrl.Manager) error { + w := new(rosaRoleConfigWebhook) + return ctrl.NewWebhookManagedBy(mgr). + For(r). + WithValidator(w). + WithDefaulter(w). + Complete() +} + +// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta2-rosaroleconfig,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=rosaroleconfigs,versions=v1beta2,name=validation.rosaroleconfig.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 +// +kubebuilder:webhook:verbs=create;update,path=/mutate-infrastructure-cluster-x-k8s-io-v1beta2-rosaroleconfig,mutating=true,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=rosaroleconfigs,versions=v1beta2,name=default.rosaroleconfig.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 + +type rosaRoleConfigWebhook struct{} + +var _ webhook.CustomDefaulter = &rosaRoleConfigWebhook{} +var _ webhook.CustomValidator = &rosaRoleConfigWebhook{} + +// ValidateCreate implements admission.Validator. +func (r *rosaRoleConfigWebhook) ValidateCreate(ctx context.Context, obj runtime.Object) (warnings admission.Warnings, err error) { + roleConfig, ok := obj.(*ROSARoleConfig) + if !ok { + return nil, fmt.Errorf("expected an ROSARoleConfig object but got %T", roleConfig) + } + + var allErrs field.ErrorList + if roleConfig.Spec.OidcProviderType == Managed && roleConfig.Spec.OperatorRoleConfig.OIDCID != "" { + err := field.Invalid(field.NewPath("spec.operatorRoleConfig.oidcId"), roleConfig.Spec.OperatorRoleConfig.OIDCID, "cannot be set with Managed oidc provider type") + allErrs = append(allErrs, err) + } else if roleConfig.Spec.OidcProviderType == Unmanaged && roleConfig.Spec.OperatorRoleConfig.OIDCID == "" { + err := field.Invalid(field.NewPath("spec.operatorRoleConfig.oidcId"), roleConfig.Spec.OperatorRoleConfig.OIDCID, "must set operatorRoleConfig.oidcId with UnManaged oidc provider type") + allErrs = append(allErrs, err) + } + + _, vErr := semver.Parse(roleConfig.Spec.AccountRoleConfig.Version) + if vErr != nil { + err := field.Invalid(field.NewPath("spec.accountRoleConfig.version"), roleConfig.Spec.AccountRoleConfig.Version, "must be a valid semantic version") + allErrs = append(allErrs, err) + } + + if len(allErrs) > 0 { + return nil, apierrors.NewInvalid( + roleConfig.GroupVersionKind().GroupKind(), + roleConfig.Name, + allErrs) + } + + return nil, nil +} + +// ValidateUpdate implements admission.Validator. +func (r *rosaRoleConfigWebhook) ValidateUpdate(ctx context.Context, old runtime.Object, updated runtime.Object) (warnings admission.Warnings, err error) { + return nil, nil +} + +// ValidateDelete implements admission.Validator. +func (r *rosaRoleConfigWebhook) ValidateDelete(ctx context.Context, obj runtime.Object) (warnings admission.Warnings, err error) { + return nil, nil +} + +// Default implements admission.Defaulter. +func (r *rosaRoleConfigWebhook) Default(ctx context.Context, obj runtime.Object) error { + return nil +} diff --git a/exp/api/v1beta2/zz_generated.deepcopy.go b/exp/api/v1beta2/zz_generated.deepcopy.go index 6885eb4c64..592ec7d7fa 100644 --- a/exp/api/v1beta2/zz_generated.deepcopy.go +++ b/exp/api/v1beta2/zz_generated.deepcopy.go @@ -21,11 +21,13 @@ limitations under the License. package v1beta2 import ( + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" apiv1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - "sigs.k8s.io/cluster-api/api/v1beta1" + rosaapiv1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" + "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -567,6 +569,11 @@ func (in *AWSManagedMachinePoolSpec) DeepCopyInto(out *AWSManagedMachinePoolSpec (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.NodeRepairConfig != nil { + in, out := &in.NodeRepairConfig, &out.NodeRepairConfig + *out = new(NodeRepairConfig) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSManagedMachinePoolSpec. @@ -621,6 +628,37 @@ func (in *AWSManagedMachinePoolStatus) DeepCopy() *AWSManagedMachinePoolStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountRoleConfig) DeepCopyInto(out *AccountRoleConfig) { + *out = *in + out.SharedVPCConfig = in.SharedVPCConfig +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountRoleConfig. +func (in *AccountRoleConfig) DeepCopy() *AccountRoleConfig { + if in == nil { + return nil + } + out := new(AccountRoleConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountRolesRef) DeepCopyInto(out *AccountRolesRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountRolesRef. +func (in *AccountRolesRef) DeepCopy() *AccountRolesRef { + if in == nil { + return nil + } + out := new(AccountRolesRef) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AutoScalingGroup) DeepCopyInto(out *AutoScalingGroup) { *out = *in @@ -688,6 +726,21 @@ func (in *BlockDeviceMapping) DeepCopy() *BlockDeviceMapping { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CFResource) DeepCopyInto(out *CFResource) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CFResource. +func (in *CFResource) DeepCopy() *CFResource { + if in == nil { + return nil + } + out := new(CFResource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EBS) DeepCopyInto(out *EBS) { *out = *in @@ -891,6 +944,42 @@ func (in *MixedInstancesPolicy) DeepCopy() *MixedInstancesPolicy { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeRepairConfig) DeepCopyInto(out *NodeRepairConfig) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeRepairConfig. +func (in *NodeRepairConfig) DeepCopy() *NodeRepairConfig { + if in == nil { + return nil + } + out := new(NodeRepairConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorRoleConfig) DeepCopyInto(out *OperatorRoleConfig) { + *out = *in + out.SharedVPCConfig = in.SharedVPCConfig +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorRoleConfig. +func (in *OperatorRoleConfig) DeepCopy() *OperatorRoleConfig { + if in == nil { + return nil + } + out := new(OperatorRoleConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Overrides) DeepCopyInto(out *Overrides) { *out = *in @@ -1129,6 +1218,254 @@ func (in *ROSAMachinePoolList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ROSANetwork) DeepCopyInto(out *ROSANetwork) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ROSANetwork. +func (in *ROSANetwork) DeepCopy() *ROSANetwork { + if in == nil { + return nil + } + out := new(ROSANetwork) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ROSANetwork) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ROSANetworkList) DeepCopyInto(out *ROSANetworkList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ROSANetwork, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ROSANetworkList. +func (in *ROSANetworkList) DeepCopy() *ROSANetworkList { + if in == nil { + return nil + } + out := new(ROSANetworkList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ROSANetworkList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ROSANetworkSpec) DeepCopyInto(out *ROSANetworkSpec) { + *out = *in + if in.AvailabilityZones != nil { + in, out := &in.AvailabilityZones, &out.AvailabilityZones + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.IdentityRef != nil { + in, out := &in.IdentityRef, &out.IdentityRef + *out = new(apiv1beta2.AWSIdentityReference) + **out = **in + } + if in.StackTags != nil { + in, out := &in.StackTags, &out.StackTags + *out = make(Tags, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ROSANetworkSpec. +func (in *ROSANetworkSpec) DeepCopy() *ROSANetworkSpec { + if in == nil { + return nil + } + out := new(ROSANetworkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ROSANetworkStatus) DeepCopyInto(out *ROSANetworkStatus) { + *out = *in + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]ROSANetworkSubnet, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]CFResource, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(v1beta1.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ROSANetworkStatus. +func (in *ROSANetworkStatus) DeepCopy() *ROSANetworkStatus { + if in == nil { + return nil + } + out := new(ROSANetworkStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ROSANetworkSubnet) DeepCopyInto(out *ROSANetworkSubnet) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ROSANetworkSubnet. +func (in *ROSANetworkSubnet) DeepCopy() *ROSANetworkSubnet { + if in == nil { + return nil + } + out := new(ROSANetworkSubnet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ROSARoleConfig) DeepCopyInto(out *ROSARoleConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ROSARoleConfig. +func (in *ROSARoleConfig) DeepCopy() *ROSARoleConfig { + if in == nil { + return nil + } + out := new(ROSARoleConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ROSARoleConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ROSARoleConfigList) DeepCopyInto(out *ROSARoleConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ROSARoleConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ROSARoleConfigList. +func (in *ROSARoleConfigList) DeepCopy() *ROSARoleConfigList { + if in == nil { + return nil + } + out := new(ROSARoleConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ROSARoleConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ROSARoleConfigSpec) DeepCopyInto(out *ROSARoleConfigSpec) { + *out = *in + out.AccountRoleConfig = in.AccountRoleConfig + out.OperatorRoleConfig = in.OperatorRoleConfig + if in.IdentityRef != nil { + in, out := &in.IdentityRef, &out.IdentityRef + *out = new(apiv1beta2.AWSIdentityReference) + **out = **in + } + if in.CredentialsSecretRef != nil { + in, out := &in.CredentialsSecretRef, &out.CredentialsSecretRef + *out = new(corev1.LocalObjectReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ROSARoleConfigSpec. +func (in *ROSARoleConfigSpec) DeepCopy() *ROSARoleConfigSpec { + if in == nil { + return nil + } + out := new(ROSARoleConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ROSARoleConfigStatus) DeepCopyInto(out *ROSARoleConfigStatus) { + *out = *in + out.AccountRolesRef = in.AccountRolesRef + out.OperatorRolesRef = in.OperatorRolesRef + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(v1beta1.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ROSARoleConfigStatus. +func (in *ROSARoleConfigStatus) DeepCopy() *ROSARoleConfigStatus { + if in == nil { + return nil + } + out := new(ROSARoleConfigStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RefreshPreferences) DeepCopyInto(out *RefreshPreferences) { *out = *in @@ -1189,21 +1526,6 @@ func (in *RollingUpdate) DeepCopy() *RollingUpdate { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RosaMachinePoolAutoScaling) DeepCopyInto(out *RosaMachinePoolAutoScaling) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RosaMachinePoolAutoScaling. -func (in *RosaMachinePoolAutoScaling) DeepCopy() *RosaMachinePoolAutoScaling { - if in == nil { - return nil - } - out := new(RosaMachinePoolAutoScaling) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RosaMachinePoolSpec) DeepCopyInto(out *RosaMachinePoolSpec) { *out = *in @@ -1228,7 +1550,7 @@ func (in *RosaMachinePoolSpec) DeepCopyInto(out *RosaMachinePoolSpec) { } if in.Autoscaling != nil { in, out := &in.Autoscaling, &out.Autoscaling - *out = new(RosaMachinePoolAutoScaling) + *out = new(rosaapiv1beta2.AutoScaling) **out = **in } if in.TuningConfigs != nil { @@ -1335,6 +1657,21 @@ func (in *RosaUpdateConfig) DeepCopy() *RosaUpdateConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedVPCConfig) DeepCopyInto(out *SharedVPCConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedVPCConfig. +func (in *SharedVPCConfig) DeepCopy() *SharedVPCConfig { + if in == nil { + return nil + } + out := new(SharedVPCConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SuspendProcessesTypes) DeepCopyInto(out *SuspendProcessesTypes) { *out = *in diff --git a/exp/controlleridentitycreator/suite_test.go b/exp/controlleridentitycreator/suite_test.go index 4cf1b0bb12..d10dac2ec2 100644 --- a/exp/controlleridentitycreator/suite_test.go +++ b/exp/controlleridentitycreator/suite_test.go @@ -30,7 +30,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // These tests use Ginkgo (BDD-style Go testing framework). Refer to diff --git a/exp/controllers/awsfargatepool_controller.go b/exp/controllers/awsfargatepool_controller.go index b4fbb0f99d..edda22d59c 100644 --- a/exp/controllers/awsfargatepool_controller.go +++ b/exp/controllers/awsfargatepool_controller.go @@ -35,9 +35,9 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -112,12 +112,12 @@ func (r *AWSFargateProfileReconciler) Reconcile(ctx context.Context, req ctrl.Re } defer func() { - applicableConditions := []clusterv1.ConditionType{ + applicableConditions := []clusterv1beta1.ConditionType{ expinfrav1.IAMFargateRolesReadyCondition, expinfrav1.EKSFargateProfileReadyCondition, } - conditions.SetSummary(fargateProfileScope.FargateProfile, conditions.WithConditions(applicableConditions...), conditions.WithStepCounter()) + v1beta1conditions.SetSummary(fargateProfileScope.FargateProfile, v1beta1conditions.WithConditions(applicableConditions...), v1beta1conditions.WithStepCounter()) if err := fargateProfileScope.Close(); err != nil && reterr == nil { reterr = err @@ -126,7 +126,7 @@ func (r *AWSFargateProfileReconciler) Reconcile(ctx context.Context, req ctrl.Re if !controlPlane.Status.Ready { log.Info("Control plane is not ready yet") - conditions.MarkFalse(fargateProfile, clusterv1.ReadyCondition, expinfrav1.WaitingForEKSControlPlaneReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(fargateProfile, clusterv1beta1.ReadyCondition, expinfrav1.WaitingForEKSControlPlaneReason, clusterv1beta1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } @@ -201,7 +201,7 @@ func managedControlPlaneToFargateProfileMapFunc(c client.Client, log logger.Wrap fargateProfileForClusterList := expinfrav1.AWSFargateProfileList{} if err := c.List( - ctx, &fargateProfileForClusterList, client.InNamespace(clusterKey.Namespace), client.MatchingLabels{clusterv1.ClusterNameLabel: clusterKey.Name}, + ctx, &fargateProfileForClusterList, client.InNamespace(clusterKey.Namespace), client.MatchingLabels{clusterv1beta1.ClusterNameLabel: clusterKey.Name}, ); err != nil { log.Error(err, "couldn't list fargate profiles for cluster") return nil diff --git a/exp/controllers/awsmachinepool_controller.go b/exp/controllers/awsmachinepool_controller.go index 0a9fa2b43c..d803db7eb8 100644 --- a/exp/controllers/awsmachinepool_controller.go +++ b/exp/controllers/awsmachinepool_controller.go @@ -22,6 +22,7 @@ import ( "fmt" "time" + autoscalingtypes "github.com/aws/aws-sdk-go-v2/service/autoscaling/types" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/pkg/errors" @@ -31,6 +32,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -52,11 +54,11 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ec2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/s3" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -109,6 +111,7 @@ func (r *AWSMachinePoolReconciler) getObjectStoreService(scope scope.S3Scope) se } // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachinepools,verbs=get;list;watch;update;patch;delete +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachinepools/finalizers,verbs=delete;update // +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=awsmachinepools/status,verbs=get;update;patch // +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinepools;machinepools/status,verbs=get;list;watch;patch // +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch @@ -181,12 +184,12 @@ func (r *AWSMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Reque // Always close the scope when exiting this function so we can persist any AWSMachine changes. defer func() { // set Ready condition before AWSMachinePool is patched - conditions.SetSummary(machinePoolScope.AWSMachinePool, - conditions.WithConditions( + v1beta1conditions.SetSummary(machinePoolScope.AWSMachinePool, + v1beta1conditions.WithConditions( expinfrav1.ASGReadyCondition, expinfrav1.LaunchTemplateReadyCondition, ), - conditions.WithStepCounterIfOnly( + v1beta1conditions.WithStepCounterIfOnly( expinfrav1.ASGReadyCondition, expinfrav1.LaunchTemplateReadyCondition, ), @@ -228,7 +231,7 @@ func (r *AWSMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr ctr WithOptions(options). For(&expinfrav1.AWSMachinePool{}). Watches( - &expclusterv1.MachinePool{}, + &clusterv1.MachinePool{}, handler.EnqueueRequestsFromMapFunc(machinePoolToInfrastructureMapFunc(expinfrav1.GroupVersion.WithKind("AWSMachinePool"))), ). WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), logger.FromContext(ctx).GetLogger(), r.WatchFilterValue)). @@ -277,16 +280,16 @@ func (r *AWSMachinePoolReconciler) reconcileNormal(ctx context.Context, machineP } } - if !machinePoolScope.Cluster.Status.InfrastructureReady { + if !ptr.Deref(machinePoolScope.Cluster.Status.Initialization.InfrastructureProvisioned, false) { machinePoolScope.Info("Cluster infrastructure is not ready yet") - conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1.ASGReadyCondition, infrav1.WaitingForClusterInfrastructureReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1.ASGReadyCondition, infrav1.WaitingForClusterInfrastructureReason, clusterv1beta1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } // Make sure bootstrap data is available and populated if machinePoolScope.MachinePool.Spec.Template.Spec.Bootstrap.DataSecretName == nil { machinePoolScope.Info("Bootstrap data secret reference is not yet available") - conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1.ASGReadyCondition, infrav1.WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1.ASGReadyCondition, infrav1.WaitingForBootstrapDataReason, clusterv1beta1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } @@ -298,21 +301,25 @@ func (r *AWSMachinePoolReconciler) reconcileNormal(ctx context.Context, machineP // Find existing ASG asg, err := r.findASG(machinePoolScope, asgsvc) if err != nil { - conditions.MarkUnknown(machinePoolScope.AWSMachinePool, expinfrav1.ASGReadyCondition, expinfrav1.ASGNotFoundReason, "%s", err.Error()) + v1beta1conditions.MarkUnknown(machinePoolScope.AWSMachinePool, expinfrav1.ASGReadyCondition, expinfrav1.ASGNotFoundReason, "%s", err.Error()) return ctrl.Result{}, err } - canUpdateLaunchTemplate := func() (bool, error) { + canStartInstanceRefresh := func() (bool, *autoscalingtypes.InstanceRefreshStatus, error) { // If there is a change: before changing the template, check if there exist an ongoing instance refresh, // because only 1 instance refresh can be "InProgress". If template is updated when refresh cannot be started, // that change will not trigger a refresh. Do not start an instance refresh if only userdata changed. if asg == nil { // If the ASG hasn't been created yet, there is no need to check if we can start the instance refresh. // But we want to update the LaunchTemplate because an error in the LaunchTemplate may be blocking the ASG creation. - return true, nil + return true, nil, nil } return asgsvc.CanStartASGInstanceRefresh(machinePoolScope) } + cancelInstanceRefresh := func() error { + machinePoolScope.Info("cancelling instance refresh") + return asgsvc.CancelASGInstanceRefresh(machinePoolScope) + } runPostLaunchTemplateUpdateOperation := func() error { // skip instance refresh if ASG is not created yet if asg == nil { @@ -336,19 +343,23 @@ func (r *AWSMachinePoolReconciler) reconcileNormal(ctx context.Context, machineP machinePoolScope.Info("starting instance refresh", "number of instances", machinePoolScope.MachinePool.Spec.Replicas) return asgsvc.StartASGInstanceRefresh(machinePoolScope) } - if err := reconSvc.ReconcileLaunchTemplate(ctx, machinePoolScope, machinePoolScope, s3Scope, ec2Svc, objectStoreSvc, canUpdateLaunchTemplate, runPostLaunchTemplateUpdateOperation); err != nil { + res, err := reconSvc.ReconcileLaunchTemplate(ctx, machinePoolScope, machinePoolScope, s3Scope, ec2Svc, objectStoreSvc, canStartInstanceRefresh, cancelInstanceRefresh, runPostLaunchTemplateUpdateOperation) + if err != nil { r.Recorder.Eventf(machinePoolScope.AWSMachinePool, corev1.EventTypeWarning, "FailedLaunchTemplateReconcile", "Failed to reconcile launch template: %v", err) machinePoolScope.Error(err, "failed to reconcile launch template") return ctrl.Result{}, err } + if res != nil { + return *res, nil + } // set the LaunchTemplateReady condition - conditions.MarkTrue(machinePoolScope.AWSMachinePool, expinfrav1.LaunchTemplateReadyCondition) + v1beta1conditions.MarkTrue(machinePoolScope.AWSMachinePool, expinfrav1.LaunchTemplateReadyCondition) if asg == nil { // Create new ASG if err := r.createPool(machinePoolScope, clusterScope); err != nil { - conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1.ASGReadyCondition, expinfrav1.ASGProvisionFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1.ASGReadyCondition, expinfrav1.ASGProvisionFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return ctrl.Result{}, err } return ctrl.Result{ @@ -364,13 +375,13 @@ func (r *AWSMachinePoolReconciler) reconcileNormal(ctx context.Context, machineP if err := createAWSMachinesIfNotExists(ctx, awsMachineList, machinePoolScope.MachinePool, &machinePoolScope.AWSMachinePool.ObjectMeta, &machinePoolScope.AWSMachinePool.TypeMeta, asg, machinePoolScope.GetLogger(), r.Client, ec2Svc); err != nil { machinePoolScope.SetNotReady() - conditions.MarkFalse(machinePoolScope.AWSMachinePool, clusterv1.ReadyCondition, expinfrav1.AWSMachineCreationFailed, clusterv1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(machinePoolScope.AWSMachinePool, clusterv1beta1.ReadyCondition, expinfrav1.AWSMachineCreationFailed, clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return ctrl.Result{}, fmt.Errorf("failed to create awsmachines: %w", err) } if err := deleteOrphanedAWSMachines(ctx, awsMachineList, asg, machinePoolScope.GetLogger(), r.Client); err != nil { machinePoolScope.SetNotReady() - conditions.MarkFalse(machinePoolScope.AWSMachinePool, clusterv1.ReadyCondition, expinfrav1.AWSMachineDeletionFailed, clusterv1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(machinePoolScope.AWSMachinePool, clusterv1beta1.ReadyCondition, expinfrav1.AWSMachineDeletionFailed, clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return ctrl.Result{}, fmt.Errorf("failed to clean up awsmachines: %w", err) } } @@ -428,7 +439,7 @@ func (r *AWSMachinePoolReconciler) reconcileNormal(ctx context.Context, machineP machinePoolScope.AWSMachinePool.Spec.ProviderIDList = providerIDList machinePoolScope.AWSMachinePool.Status.Replicas = int32(len(providerIDList)) //#nosec G115 machinePoolScope.AWSMachinePool.Status.Ready = true - conditions.MarkTrue(machinePoolScope.AWSMachinePool, expinfrav1.ASGReadyCondition) + v1beta1conditions.MarkTrue(machinePoolScope.AWSMachinePool, expinfrav1.ASGReadyCondition) err = machinePoolScope.UpdateInstanceStatuses(ctx, asg.Instances) if err != nil { @@ -474,7 +485,7 @@ func (r *AWSMachinePoolReconciler) reconcileDelete(ctx context.Context, machineP case expinfrav1.ASGStatusDeleteInProgress: // ASG is already deleting machinePoolScope.SetNotReady() - conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1.ASGReadyCondition, expinfrav1.ASGDeletionInProgress, clusterv1.ConditionSeverityWarning, "") + v1beta1conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1.ASGReadyCondition, expinfrav1.ASGDeletionInProgress, clusterv1beta1.ConditionSeverityWarning, "") r.Recorder.Eventf(machinePoolScope.AWSMachinePool, corev1.EventTypeWarning, "DeletionInProgress", "ASG deletion in progress: %q", asg.Name) machinePoolScope.Info("ASG is already deleting", "name", asg.Name) default: @@ -654,7 +665,7 @@ func diffASG(machinePoolScope *scope.MachinePoolScope, existingASG *expinfrav1.A } // getOwnerMachinePool returns the MachinePool object owning the current resource. -func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*expclusterv1.MachinePool, error) { +func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1.MachinePool, error) { for _, ref := range obj.OwnerReferences { if ref.Kind != "MachinePool" { continue @@ -663,7 +674,7 @@ func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.Object if err != nil { return nil, errors.WithStack(err) } - if gv.Group == expclusterv1.GroupVersion.Group { + if gv.Group == clusterv1.GroupVersion.Group { return getMachinePoolByName(ctx, c, obj.Namespace, ref.Name) } } @@ -671,8 +682,8 @@ func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.Object } // getMachinePoolByName finds and return a Machine object using the specified params. -func getMachinePoolByName(ctx context.Context, c client.Client, namespace, name string) (*expclusterv1.MachinePool, error) { - m := &expclusterv1.MachinePool{} +func getMachinePoolByName(ctx context.Context, c client.Client, namespace, name string) (*clusterv1.MachinePool, error) { + m := &clusterv1.MachinePool{} key := client.ObjectKey{Name: name, Namespace: namespace} if err := c.Get(ctx, key, m); err != nil { return nil, err @@ -682,14 +693,14 @@ func getMachinePoolByName(ctx context.Context, c client.Client, namespace, name func machinePoolToInfrastructureMapFunc(gvk schema.GroupVersionKind) handler.MapFunc { return func(ctx context.Context, o client.Object) []reconcile.Request { - m, ok := o.(*expclusterv1.MachinePool) + m, ok := o.(*clusterv1.MachinePool) if !ok { klog.Errorf("Expected a MachinePool but got a %T", o) } gk := gvk.GroupKind() // Return early if the GroupKind doesn't match what we expect - infraGK := m.Spec.Template.Spec.InfrastructureRef.GroupVersionKind().GroupKind() + infraGK := m.Spec.Template.Spec.InfrastructureRef.GroupKind() if gk != infraGK { return nil } @@ -717,7 +728,7 @@ func (r *AWSMachinePoolReconciler) getInfraCluster(ctx context.Context, log *log var managedControlPlaneScope *scope.ManagedControlPlaneScope var err error - if cluster.Spec.ControlPlaneRef != nil && cluster.Spec.ControlPlaneRef.Kind == controllers.AWSManagedControlPlaneRefKind { + if cluster.Spec.ControlPlaneRef.IsDefined() && cluster.Spec.ControlPlaneRef.Kind == controllers.AWSManagedControlPlaneRefKind { controlPlane := &ekscontrolplanev1.AWSManagedControlPlane{} controlPlaneName := client.ObjectKey{ Namespace: awsMachinePool.Namespace, diff --git a/exp/controllers/awsmachinepool_controller_test.go b/exp/controllers/awsmachinepool_controller_test.go index 59675a287a..82ba81e8a9 100644 --- a/exp/controllers/awsmachinepool_controller_test.go +++ b/exp/controllers/awsmachinepool_controller_test.go @@ -25,6 +25,7 @@ import ( "testing" "github.com/aws/aws-sdk-go-v2/aws" + autoscalingtypes "github.com/aws/aws-sdk-go-v2/service/autoscaling/types" ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/go-logr/logr" @@ -54,9 +55,9 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/sts/mock_stsiface" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/labels/format" "sigs.k8s.io/cluster-api/util/patch" ) @@ -146,10 +147,12 @@ func TestAWSMachinePoolReconciler(t *testing.T) { Client: testEnv.Client, Cluster: &clusterv1.Cluster{ Status: clusterv1.ClusterStatus{ - InfrastructureReady: true, + Initialization: clusterv1.ClusterInitializationStatus{ + InfrastructureProvisioned: ptr.To(true), + }, }, }, - MachinePool: &expclusterv1.MachinePool{ + MachinePool: &clusterv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "mp", Namespace: "default", @@ -159,11 +162,16 @@ func TestAWSMachinePoolReconciler(t *testing.T) { APIVersion: "cluster.x-k8s.io/v1beta1", Kind: "MachinePool", }, - Spec: expclusterv1.MachinePoolSpec{ + Spec: clusterv1.MachinePoolSpec{ ClusterName: "test", Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ ClusterName: "test", + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Name: "rosa-mp", + Kind: "ROSAMachinePool", + APIGroup: clusterv1.GroupVersion.Group, + }, Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To[string]("bootstrap-data"), }, @@ -262,7 +270,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { defer teardown(t, g) getASG(t, g) - ms.Cluster.Status.InfrastructureReady = false + ms.Cluster.Status.Initialization.InfrastructureProvisioned = ptr.To(false) buf := new(bytes.Buffer) klog.SetOutput(buf) @@ -270,7 +278,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs) g.Expect(err).To(BeNil()) g.Expect(buf.String()).To(ContainSubstring("Cluster infrastructure is not ready yet")) - expectConditions(g, ms.AWSMachinePool, []conditionAssertion{{expinfrav1.ASGReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, infrav1.WaitingForClusterInfrastructureReason}}) + expectConditions(g, ms.AWSMachinePool, []conditionAssertion{{expinfrav1.ASGReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, infrav1.WaitingForClusterInfrastructureReason}}) }) t.Run("should exit immediately if bootstrap data secret reference isn't available", func(t *testing.T) { g := NewWithT(t) @@ -286,7 +294,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { g.Expect(err).To(BeNil()) g.Expect(buf.String()).To(ContainSubstring("Bootstrap data secret reference is not yet available")) - expectConditions(g, ms.AWSMachinePool, []conditionAssertion{{expinfrav1.ASGReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, infrav1.WaitingForBootstrapDataReason}}) + expectConditions(g, ms.AWSMachinePool, []conditionAssertion{{expinfrav1.ASGReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, infrav1.WaitingForBootstrapDataReason}}) }) }) t.Run("there's a provider ID", func(t *testing.T) { @@ -309,7 +317,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { getASG(t, g) expectedErr := errors.New("no connection available ") - reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(expectedErr) + reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, expectedErr) _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs) g.Expect(errors.Cause(err)).To(MatchError(expectedErr)) }) @@ -335,7 +343,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { Subnets: []string{}, } - reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil) asgSvc.EXPECT().GetASGByName(gomock.Any()).Return(asg, nil) asgSvc.EXPECT().DescribeLifecycleHooks(gomock.Any()).Return(nil, nil) asgSvc.EXPECT().SubnetIDs(gomock.Any()).Return([]string{}, nil) @@ -373,7 +381,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { Subnets: []string{}, } - reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil) asgSvc.EXPECT().GetASGByName(gomock.Any()).Return(asg, nil) ec2Svc.EXPECT().InstanceIfExists(aws.String("1")).Return(&infrav1.Instance{ID: "1", Type: "m6.2xlarge"}, nil) ec2Svc.EXPECT().InstanceIfExists(aws.String("2")).Return(&infrav1.Instance{ID: "2", Type: "m6.2xlarge"}, nil) @@ -417,6 +425,18 @@ func TestAWSMachinePoolReconciler(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: "test", + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Name: "name-1", + Kind: "ROSAMachine", + APIGroup: clusterv1.GroupVersion.Group, + }, + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: clusterv1.ContractVersionedObjectReference{ + Name: "name-1-config", + Kind: "EKSConfig", + APIGroup: clusterv1.GroupVersion.Group, + }, + }, }, })).To(Succeed()) g.Expect(testEnv.Create(context.Background(), &infrav1.AWSMachine{ @@ -449,6 +469,18 @@ func TestAWSMachinePoolReconciler(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: "test", + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Name: "name-2", + Kind: "ROSAMachinePool", + APIGroup: clusterv1.GroupVersion.Group, + }, + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: clusterv1.ContractVersionedObjectReference{ + Name: "name-2-config", + Kind: "EKSConfig", + APIGroup: clusterv1.GroupVersion.Group, + }, + }, }, })).To(Succeed()) g.Expect(testEnv.Create(context.Background(), &infrav1.AWSMachine{ @@ -474,7 +506,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { }, })).To(Succeed()) - reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil) asgSvc.EXPECT().GetASGByName(gomock.Any()).Return(asg, nil) asgSvc.EXPECT().DescribeLifecycleHooks(gomock.Any()).Return(nil, nil) asgSvc.EXPECT().SubnetIDs(gomock.Any()).Return([]string{}, nil) @@ -509,7 +541,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { defer teardown(t, g) setSuspendedProcesses(t, g) - reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil) asgSvc.EXPECT().GetASGByName(gomock.Any()).Return(nil, nil) asgSvc.EXPECT().CreateASG(gomock.Any()).Return(&expinfrav1.AutoScalingGroup{ Name: "name", @@ -533,7 +565,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { defer teardown(t, g) setSuspendedProcesses(t, g) ms.AWSMachinePool.Spec.SuspendProcesses.All = true - reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil) asgSvc.EXPECT().DescribeLifecycleHooks(gomock.Any()).Return(nil, nil) reconSvc.EXPECT().ReconcileTags(gomock.Any(), gomock.Any()).Return(nil) asgSvc.EXPECT().GetASGByName(gomock.Any()).Return(&expinfrav1.AutoScalingGroup{ @@ -574,7 +606,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { defer teardown(t, g) setSuspendedProcesses(t, g) - reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil) asgSvc.EXPECT().DescribeLifecycleHooks(gomock.Any()).Return(nil, nil) reconSvc.EXPECT().ReconcileTags(gomock.Any(), gomock.Any()).Return(nil) asgSvc.EXPECT().GetASGByName(gomock.Any()).Return(&expinfrav1.AutoScalingGroup{ @@ -600,7 +632,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { Name: "an-asg", DesiredCapacity: ptr.To[int32](1), } - reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil) asgSvc.EXPECT().DescribeLifecycleHooks(gomock.Any()).Return(nil, nil) asgSvc.EXPECT().GetASGByName(gomock.Any()).Return(&asg, nil) asgSvc.EXPECT().SubnetIDs(gomock.Any()).Return([]string{}, nil) @@ -641,7 +673,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { }, Subnets: []string{"subnet1", "subnet2"}, } - reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil) asgSvc.EXPECT().DescribeLifecycleHooks(gomock.Any()).Return(nil, nil) reconSvc.EXPECT().ReconcileTags(gomock.Any(), gomock.Any()).Return(nil) asgSvc.EXPECT().GetASGByName(gomock.Any()).Return(&asg, nil).AnyTimes() @@ -661,7 +693,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { MaxSize: int32(100), Subnets: []string{"subnet1", "subnet2"}, } - reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil) asgSvc.EXPECT().DescribeLifecycleHooks(gomock.Any()).Return(nil, nil) reconSvc.EXPECT().ReconcileTags(gomock.Any(), gomock.Any()).Return(nil) asgSvc.EXPECT().GetASGByName(gomock.Any()).Return(&asg, nil).AnyTimes() @@ -681,7 +713,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { MaxSize: int32(2), Subnets: []string{}, } - reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil) asgSvc.EXPECT().DescribeLifecycleHooks(gomock.Any()).Return(nil, nil) reconSvc.EXPECT().ReconcileTags(gomock.Any(), gomock.Any()).Return(nil) asgSvc.EXPECT().GetASGByName(gomock.Any()).Return(&asg, nil).AnyTimes() @@ -791,7 +823,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { nil) ec2Svc.EXPECT().DiscoverLaunchTemplateAMI(gomock.Any(), gomock.Any()).Return(ptr.To[string]("ami-different"), nil) ec2Svc.EXPECT().LaunchTemplateNeedsUpdate(gomock.Any(), gomock.Any(), gomock.Any()).Return(false, nil) - asgSvc.EXPECT().CanStartASGInstanceRefresh(gomock.Any()).Return(true, nil) + asgSvc.EXPECT().CanStartASGInstanceRefresh(gomock.Any()).Return(true, nil, nil) ec2Svc.EXPECT().PruneLaunchTemplateVersions(gomock.Any()).Return(nil, nil) ec2Svc.EXPECT().CreateLaunchTemplateVersion(gomock.Any(), gomock.Any(), gomock.Eq(ptr.To[string]("ami-different")), gomock.Eq(apimachinerytypes.NamespacedName{Namespace: "default", Name: "bootstrap-data"}), gomock.Any(), gomock.Any()).Return(nil) ec2Svc.EXPECT().GetLaunchTemplateLatestVersion(gomock.Any()).Return("2", nil) @@ -847,7 +879,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { nil) ec2Svc.EXPECT().DiscoverLaunchTemplateAMI(gomock.Any(), gomock.Any()).Return(ptr.To[string]("ami-existing"), nil) ec2Svc.EXPECT().LaunchTemplateNeedsUpdate(gomock.Any(), gomock.Any(), gomock.Any()).Return(false, nil) - asgSvc.EXPECT().CanStartASGInstanceRefresh(gomock.Any()).Return(true, nil) + asgSvc.EXPECT().CanStartASGInstanceRefresh(gomock.Any()).Return(true, nil, nil) ec2Svc.EXPECT().PruneLaunchTemplateVersions(gomock.Any()).Return(nil, nil) ec2Svc.EXPECT().CreateLaunchTemplateVersion(gomock.Any(), gomock.Any(), gomock.Eq(ptr.To[string]("ami-existing")), gomock.Eq(apimachinerytypes.NamespacedName{Namespace: "default", Name: "bootstrap-data"}), gomock.Any(), gomock.Any()).Return(nil) ec2Svc.EXPECT().GetLaunchTemplateLatestVersion(gomock.Any()).Return("2", nil) @@ -939,7 +971,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { nil) ec2Svc.EXPECT().DiscoverLaunchTemplateAMI(gomock.Any(), gomock.Any()).Return(ptr.To[string]("ami-existing"), nil) ec2Svc.EXPECT().LaunchTemplateNeedsUpdate(gomock.Any(), gomock.Any(), gomock.Any()).Return(false, nil) - asgSvc.EXPECT().CanStartASGInstanceRefresh(gomock.Any()).Return(true, nil) + asgSvc.EXPECT().CanStartASGInstanceRefresh(gomock.Any()).Return(true, nil, nil) ec2Svc.EXPECT().PruneLaunchTemplateVersions(gomock.Any()).Return(nil, nil) ec2Svc.EXPECT().CreateLaunchTemplateVersion(gomock.Any(), gomock.Any(), gomock.Eq(ptr.To[string]("ami-existing")), gomock.Eq(apimachinerytypes.NamespacedName{Namespace: "default", Name: "bootstrap-data-new"}), gomock.Any(), gomock.Any()).Return(nil) ec2Svc.EXPECT().GetLaunchTemplateLatestVersion(gomock.Any()).Return("2", nil) @@ -1031,15 +1063,19 @@ func TestAWSMachinePoolReconciler(t *testing.T) { return &s3.PutObjectOutput{}, nil }) - // Simulate a pending instance refresh - asgSvc.EXPECT().CanStartASGInstanceRefresh(gomock.Any()).Return(false, nil) + // Simulate a pending instance refresh here, to see if `CancelInstanceRefresh` gets called + instanceRefreshStatus := autoscalingtypes.InstanceRefreshStatusPending + asgSvc.EXPECT().CanStartASGInstanceRefresh(gomock.Any()).Return(false, &instanceRefreshStatus, nil) + asgSvc.EXPECT().CancelASGInstanceRefresh(gomock.Any()).Return(nil) - _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs) - g.Expect(err).To(HaveOccurred()) - expectConditions(g, ms.AWSMachinePool, []conditionAssertion{{expinfrav1.PreLaunchTemplateUpdateCheckCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, expinfrav1.PreLaunchTemplateUpdateCheckFailedReason}}) + // First reconciliation should notice the existing instance refresh and cancel it. + // Since the cancellation is asynchronous, the controller should requeue. + res, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs) + g.Expect(res.RequeueAfter).To(BeNumerically(">", 0)) + g.Expect(err).To(Succeed()) - // Now simulate that no pending instance refresh exists - asgSvc.EXPECT().CanStartASGInstanceRefresh(gomock.Any()).Return(true, nil) + // Now simulate that no pending instance refresh exists. Cancellation should not be called anymore. + asgSvc.EXPECT().CanStartASGInstanceRefresh(gomock.Any()).Return(true, nil, nil) asgSvc.EXPECT().GetASGByName(gomock.Any()).DoAndReturn(func(scope *scope.MachinePoolScope) (*expinfrav1.AutoScalingGroup, error) { g.Expect(scope.Name()).To(Equal("test")) @@ -1077,6 +1113,9 @@ func TestAWSMachinePoolReconciler(t *testing.T) { return &s3.PutObjectOutput{}, nil }) + // No cancellation expected in this second reconciliation (see above) + asgSvc.EXPECT().CancelASGInstanceRefresh(gomock.Any()).Times(0) + var simulatedDeletedVersionNumber int64 = 777 bootstrapDataHash := "some-simulated-hash" ec2Svc.EXPECT().PruneLaunchTemplateVersions(gomock.Any()).Return(&ec2types.LaunchTemplateVersion{ @@ -1194,7 +1233,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { } ms.AWSMachinePool.Spec.AWSLifecycleHooks = append(ms.AWSMachinePool.Spec.AWSLifecycleHooks, newLifecycleHook) - reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil) // New ASG must be created with lifecycle hooks (single AWS SDK call is enough) // @@ -1220,7 +1259,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { } ms.AWSMachinePool.Spec.AWSLifecycleHooks = append(ms.AWSMachinePool.Spec.AWSLifecycleHooks, newLifecycleHook) - reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil) asgSvc.EXPECT().DescribeLifecycleHooks(gomock.Eq(ms.Name())).Return(nil, nil) asgSvc.EXPECT().CreateLifecycleHook(gomock.Any(), ms.Name(), &newLifecycleHook).Return(nil) reconSvc.EXPECT().ReconcileTags(gomock.Any(), gomock.Any()).Return(nil) @@ -1250,7 +1289,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { setup(t, g) defer teardown(t, g) - reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil) asgSvc.EXPECT().DescribeLifecycleHooks(gomock.Eq(ms.Name())).Return([]*expinfrav1.AWSLifecycleHook{ { Name: "hook-to-remove", @@ -1293,7 +1332,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { } ms.AWSMachinePool.Spec.AWSLifecycleHooks = append(ms.AWSMachinePool.Spec.AWSLifecycleHooks, newLifecycleHook) - reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil) asgSvc.EXPECT().DescribeLifecycleHooks(gomock.Eq(ms.Name())).Return([]*expinfrav1.AWSLifecycleHook{ { Name: "hook-to-remove", @@ -1337,7 +1376,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { } ms.AWSMachinePool.Spec.AWSLifecycleHooks = append(ms.AWSMachinePool.Spec.AWSLifecycleHooks, updateLifecycleHook) - reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + reconSvc.EXPECT().ReconcileLaunchTemplate(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil) asgSvc.EXPECT().DescribeLifecycleHooks(gomock.Eq(ms.Name())).Return([]*expinfrav1.AWSLifecycleHook{ { Name: "hook-to-update", @@ -1371,16 +1410,16 @@ func TestAWSMachinePoolReconciler(t *testing.T) { } type conditionAssertion struct { - conditionType clusterv1.ConditionType + conditionType clusterv1beta1.ConditionType status corev1.ConditionStatus - severity clusterv1.ConditionSeverity + severity clusterv1beta1.ConditionSeverity reason string } func expectConditions(g *WithT, m *expinfrav1.AWSMachinePool, expected []conditionAssertion) { g.Expect(len(m.Status.Conditions)).To(BeNumerically(">=", len(expected)), "number of conditions") for _, c := range expected { - actual := conditions.Get(m, c.conditionType) + actual := v1beta1conditions.Get(m, c.conditionType) g.Expect(actual).To(Not(BeNil())) g.Expect(actual.Type).To(Equal(c.conditionType)) g.Expect(actual.Status).To(Equal(c.status)) @@ -1420,8 +1459,8 @@ func TestDiffASG(t *testing.T) { name: "replicas != asg.desiredCapacity", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](0), }, }, @@ -1436,8 +1475,8 @@ func TestDiffASG(t *testing.T) { name: "replicas (nil) != asg.desiredCapacity", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: nil, }, }, @@ -1452,8 +1491,8 @@ func TestDiffASG(t *testing.T) { name: "replicas != asg.desiredCapacity (nil)", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](0), }, }, @@ -1468,8 +1507,8 @@ func TestDiffASG(t *testing.T) { name: "maxSize != asg.maxSize", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](1), }, }, @@ -1490,8 +1529,8 @@ func TestDiffASG(t *testing.T) { name: "minSize != asg.minSize", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](1), }, }, @@ -1514,8 +1553,8 @@ func TestDiffASG(t *testing.T) { name: "capacityRebalance != asg.capacityRebalance", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](1), }, }, @@ -1540,8 +1579,8 @@ func TestDiffASG(t *testing.T) { name: "MixedInstancesPolicy != asg.MixedInstancesPolicy", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](1), }, }, @@ -1574,8 +1613,8 @@ func TestDiffASG(t *testing.T) { name: "MixedInstancesPolicy.InstancesDistribution != asg.MixedInstancesPolicy.InstancesDistribution", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](1), }, }, @@ -1627,8 +1666,8 @@ func TestDiffASG(t *testing.T) { name: "MixedInstancesPolicy.InstancesDistribution unset", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](1), }, }, @@ -1674,8 +1713,8 @@ func TestDiffASG(t *testing.T) { name: "SuspendProcesses != asg.SuspendProcesses", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](1), }, }, @@ -1715,8 +1754,8 @@ func TestDiffASG(t *testing.T) { name: "all matches", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](1), }, }, @@ -1753,13 +1792,13 @@ func TestDiffASG(t *testing.T) { name: "externally managed annotation ignores difference between desiredCapacity and replicas", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ + MachinePool: &clusterv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ clusterv1.ReplicasManagedByAnnotation: "", // empty value counts as true (= externally managed) }, }, - Spec: expclusterv1.MachinePoolSpec{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](0), }, }, @@ -1777,8 +1816,8 @@ func TestDiffASG(t *testing.T) { name: "without externally managed annotation ignores difference between desiredCapacity and replicas", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](0), }, }, diff --git a/exp/controllers/awsmachinepool_machines.go b/exp/controllers/awsmachinepool_machines.go index 24c633df05..b38789d154 100644 --- a/exp/controllers/awsmachinepool_machines.go +++ b/exp/controllers/awsmachinepool_machines.go @@ -17,13 +17,12 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/feature" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ec2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/labels/format" ) -func createAWSMachinesIfNotExists(ctx context.Context, awsMachineList *infrav1.AWSMachineList, mp *expclusterv1.MachinePool, infraMachinePoolMeta *metav1.ObjectMeta, infraMachinePoolType *metav1.TypeMeta, existingASG *expinfrav1.AutoScalingGroup, l logr.Logger, client client.Client, ec2Svc services.EC2Interface) error { +func createAWSMachinesIfNotExists(ctx context.Context, awsMachineList *infrav1.AWSMachineList, mp *clusterv1.MachinePool, infraMachinePoolMeta *metav1.ObjectMeta, infraMachinePoolType *metav1.TypeMeta, existingASG *expinfrav1.AutoScalingGroup, l logr.Logger, client client.Client, ec2Svc services.EC2Interface) error { if !feature.Gates.Enabled(feature.MachinePoolMachines) { return errors.New("createAWSMachinesIfNotExists must not be called unless the MachinePoolMachines feature gate is enabled") } @@ -161,7 +160,7 @@ func deleteOrphanedAWSMachines(ctx context.Context, awsMachineList *infrav1.AWSM return nil } -func getAWSMachines(ctx context.Context, mp *expclusterv1.MachinePool, kubeClient client.Client) (*infrav1.AWSMachineList, error) { +func getAWSMachines(ctx context.Context, mp *clusterv1.MachinePool, kubeClient client.Client) (*infrav1.AWSMachineList, error) { if !feature.Gates.Enabled(feature.MachinePoolMachines) { return nil, errors.New("getAWSMachines must not be called unless the MachinePoolMachines feature gate is enabled") } @@ -177,7 +176,7 @@ func getAWSMachines(ctx context.Context, mp *expclusterv1.MachinePool, kubeClien return awsMachineList, nil } -func reconcileDeleteAWSMachines(ctx context.Context, mp *expclusterv1.MachinePool, client client.Client, l logr.Logger) error { +func reconcileDeleteAWSMachines(ctx context.Context, mp *clusterv1.MachinePool, client client.Client, l logr.Logger) error { if !feature.Gates.Enabled(feature.MachinePoolMachines) { return errors.New("reconcileDeleteAWSMachines must not be called unless the MachinePoolMachines feature gate is enabled") } diff --git a/exp/controllers/awsmanagedmachinepool_controller.go b/exp/controllers/awsmanagedmachinepool_controller.go index b7e918f7aa..efd66cc5e0 100644 --- a/exp/controllers/awsmanagedmachinepool_controller.go +++ b/exp/controllers/awsmanagedmachinepool_controller.go @@ -20,6 +20,7 @@ import ( "context" "time" + autoscalingtypes "github.com/aws/aws-sdk-go-v2/service/autoscaling/types" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -43,10 +44,10 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -75,7 +76,7 @@ func (r *AWSManagedMachinePoolReconciler) SetupWithManager(ctx context.Context, WithOptions(options). WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), log.GetLogger(), r.WatchFilterValue)). Watches( - &expclusterv1.MachinePool{}, + &clusterv1.MachinePool{}, handler.EnqueueRequestsFromMapFunc(machinePoolToInfrastructureMapFunc(gvk)), ). Watches( @@ -151,7 +152,7 @@ func (r *AWSManagedMachinePoolReconciler) Reconcile(ctx context.Context, req ctr if !controlPlane.Status.Ready { log.Info("Control plane is not ready yet") - conditions.MarkFalse(awsPool, expinfrav1.EKSNodegroupReadyCondition, expinfrav1.WaitingForEKSControlPlaneReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(awsPool, expinfrav1.EKSNodegroupReadyCondition, expinfrav1.WaitingForEKSControlPlaneReason, clusterv1beta1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } @@ -173,13 +174,13 @@ func (r *AWSManagedMachinePoolReconciler) Reconcile(ctx context.Context, req ctr } defer func() { - applicableConditions := []clusterv1.ConditionType{ + applicableConditions := []clusterv1beta1.ConditionType{ expinfrav1.EKSNodegroupReadyCondition, expinfrav1.IAMNodegroupRolesReadyCondition, expinfrav1.LaunchTemplateReadyCondition, } - conditions.SetSummary(machinePoolScope.ManagedMachinePool, conditions.WithConditions(applicableConditions...), conditions.WithStepCounter()) + v1beta1conditions.SetSummary(machinePoolScope.ManagedMachinePool, v1beta1conditions.WithConditions(applicableConditions...), v1beta1conditions.WithStepCounter()) if err := machinePoolScope.Close(); err != nil && reterr == nil { reterr = err @@ -190,7 +191,7 @@ func (r *AWSManagedMachinePoolReconciler) Reconcile(ctx context.Context, req ctr return ctrl.Result{}, r.reconcileDelete(ctx, machinePoolScope, managedControlPlaneScope) } - return ctrl.Result{}, r.reconcileNormal(ctx, machinePoolScope, managedControlPlaneScope, managedControlPlaneScope) + return r.reconcileNormal(ctx, machinePoolScope, managedControlPlaneScope, managedControlPlaneScope) } func (r *AWSManagedMachinePoolReconciler) reconcileNormal( @@ -198,12 +199,12 @@ func (r *AWSManagedMachinePoolReconciler) reconcileNormal( machinePoolScope *scope.ManagedMachinePoolScope, ec2Scope scope.EC2Scope, s3Scope scope.S3Scope, -) error { +) (ctrl.Result, error) { machinePoolScope.Info("Reconciling AWSManagedMachinePool") if controllerutil.AddFinalizer(machinePoolScope.ManagedMachinePool, expinfrav1.ManagedMachinePoolFinalizer) { if err := machinePoolScope.PatchObject(); err != nil { - return err + return ctrl.Result{}, err } } @@ -212,18 +213,25 @@ func (r *AWSManagedMachinePoolReconciler) reconcileNormal( reconSvc := r.getReconcileService(ec2Scope) if machinePoolScope.ManagedMachinePool.Spec.AWSLaunchTemplate != nil { - canUpdateLaunchTemplate := func() (bool, error) { - return true, nil + canStartInstanceRefresh := func() (bool, *autoscalingtypes.InstanceRefreshStatus, error) { + return true, nil, nil + } + cancelInstanceRefresh := func() error { + return nil } runPostLaunchTemplateUpdateOperation := func() error { return nil } var objectStoreSvc services.ObjectStoreInterface // nil because no S3 bucket support for `AWSManagedControlPlane` yet - if err := reconSvc.ReconcileLaunchTemplate(ctx, machinePoolScope, machinePoolScope, s3Scope, ec2svc, objectStoreSvc, canUpdateLaunchTemplate, runPostLaunchTemplateUpdateOperation); err != nil { + res, err := reconSvc.ReconcileLaunchTemplate(ctx, machinePoolScope, machinePoolScope, s3Scope, ec2svc, objectStoreSvc, canStartInstanceRefresh, cancelInstanceRefresh, runPostLaunchTemplateUpdateOperation) + if err != nil { r.Recorder.Eventf(machinePoolScope.ManagedMachinePool, corev1.EventTypeWarning, "FailedLaunchTemplateReconcile", "Failed to reconcile launch template: %v", err) machinePoolScope.Error(err, "failed to reconcile launch template") - conditions.MarkFalse(machinePoolScope.ManagedMachinePool, expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateReconcileFailedReason, clusterv1.ConditionSeverityError, "") - return err + v1beta1conditions.MarkFalse(machinePoolScope.ManagedMachinePool, expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateReconcileFailedReason, clusterv1beta1.ConditionSeverityError, "") + return ctrl.Result{}, err + } + if res != nil { + return *res, nil } launchTemplateID := machinePoolScope.GetLaunchTemplateIDStatus() @@ -232,18 +240,18 @@ func (r *AWSManagedMachinePoolReconciler) reconcileNormal( ResourceService: ec2svc, }} if err := reconSvc.ReconcileTags(machinePoolScope, resourceServiceToUpdate); err != nil { - return errors.Wrap(err, "error updating tags") + return ctrl.Result{}, errors.Wrap(err, "error updating tags") } // set the LaunchTemplateReady condition - conditions.MarkTrue(machinePoolScope.ManagedMachinePool, expinfrav1.LaunchTemplateReadyCondition) + v1beta1conditions.MarkTrue(machinePoolScope.ManagedMachinePool, expinfrav1.LaunchTemplateReadyCondition) } if err := ekssvc.ReconcilePool(ctx); err != nil { - return errors.Wrapf(err, "failed to reconcile machine pool for AWSManagedMachinePool %s/%s", machinePoolScope.ManagedMachinePool.Namespace, machinePoolScope.ManagedMachinePool.Name) + return ctrl.Result{}, errors.Wrapf(err, "failed to reconcile machine pool for AWSManagedMachinePool %s/%s", machinePoolScope.ManagedMachinePool.Namespace, machinePoolScope.ManagedMachinePool.Name) } - return nil + return ctrl.Result{}, nil } func (r *AWSManagedMachinePoolReconciler) reconcileDelete( @@ -328,7 +336,7 @@ func managedControlPlaneToManagedMachinePoolMapFunc(c client.Client, gvk schema. return nil } - managedPoolForClusterList := expclusterv1.MachinePoolList{} + managedPoolForClusterList := clusterv1.MachinePoolList{} if err := c.List( ctx, &managedPoolForClusterList, client.InNamespace(clusterKey.Namespace), client.MatchingLabels{clusterv1.ClusterNameLabel: clusterKey.Name}, ); err != nil { diff --git a/exp/controllers/rosamachinepool_controller.go b/exp/controllers/rosamachinepool_controller.go index 9aebd92622..2666405de2 100644 --- a/exp/controllers/rosamachinepool_controller.go +++ b/exp/controllers/rosamachinepool_controller.go @@ -37,12 +37,12 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/conditions" - "sigs.k8s.io/cluster-api/util/patch" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -71,7 +71,7 @@ func (r *ROSAMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr ct WithOptions(options). WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), log.GetLogger(), r.WatchFilterValue)). Watches( - &expclusterv1.MachinePool{}, + &clusterv1.MachinePool{}, handler.EnqueueRequestsFromMapFunc(machinePoolToInfrastructureMapFunc(gvk)), ). Watches( @@ -131,6 +131,13 @@ func (r *ROSAMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Requ controlPlane := &rosacontrolplanev1.ROSAControlPlane{} if err := r.Client.Get(ctx, controlPlaneKey, controlPlane); err != nil { if apierrors.IsNotFound(err) && !rosaMachinePool.DeletionTimestamp.IsZero() { + // When the ROSAControlPlane is not found and the ROSAMachinePool CR is marked for deletion, + // it indicates that the ROSAControlPlane (and its associated NodePools) has already been deleted, + // while the ROSAMachinePool remains pending — since a ROSA-HCP cluster cannot exist without a NodePool. + // To handle this scenario, we trigger deletion of the ROSAControlPlane CR to initiate cleanup of the ROSA-HCP, + // relying on OCM to cascade-delete the related NodePools. + // Note: This state should rarely occur. However, during smoke tests, the ROSAMachinePool reconcile cycle + // may occasionally lag behind the deletion of the NodePools and ROSAControlPlane. log.Info("RosaControlPlane not found, RosaMachinePool is deleted") patchHelper, err := patch.NewHelper(rosaMachinePool, r.Client) if err != nil { @@ -138,8 +145,9 @@ func (r *ROSAMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Requ } controllerutil.RemoveFinalizer(rosaMachinePool, expinfrav1.RosaMachinePoolFinalizer) - return ctrl.Result{}, patchHelper.Patch(ctx, rosaMachinePool, patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ - expinfrav1.RosaMachinePoolReadyCondition}}) + return ctrl.Result{}, patchHelper.Patch(ctx, rosaMachinePool, patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ + expinfrav1.RosaMachinePoolReadyCondition, + }}) } log.Info("Failed to retrieve ControlPlane from MachinePool") @@ -177,7 +185,7 @@ func (r *ROSAMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Requ } defer func() { - conditions.SetSummary(machinePoolScope.RosaMachinePool, conditions.WithConditions(expinfrav1.RosaMachinePoolReadyCondition), conditions.WithStepCounter()) + v1beta1conditions.SetSummary(machinePoolScope.RosaMachinePool, v1beta1conditions.WithConditions(expinfrav1.RosaMachinePoolReadyCondition), v1beta1conditions.WithStepCounter()) if err := machinePoolScope.Close(); err != nil && reterr == nil { reterr = err @@ -269,7 +277,7 @@ func (r *ROSAMachinePoolReconciler) reconcileNormal(ctx context.Context, rosaMachinePool.Status.Replicas = currentReplicas if rosa.IsNodePoolReady(nodePool) { - conditions.MarkTrue(rosaMachinePool, expinfrav1.RosaMachinePoolReadyCondition) + v1beta1conditions.MarkTrue(rosaMachinePool, expinfrav1.RosaMachinePoolReadyCondition) rosaMachinePool.Status.Ready = true if err := r.reconcileMachinePoolVersion(machinePoolScope, ocmClient, nodePool); err != nil { @@ -279,10 +287,10 @@ func (r *ROSAMachinePoolReconciler) reconcileNormal(ctx context.Context, return ctrl.Result{}, nil } - conditions.MarkFalse(rosaMachinePool, + v1beta1conditions.MarkFalse(rosaMachinePool, expinfrav1.RosaMachinePoolReadyCondition, nodePool.Status().Message(), - clusterv1.ConditionSeverityInfo, + clusterv1beta1.ConditionSeverityInfo, "") machinePoolScope.Info("waiting for NodePool to become ready", "state", nodePool.Status().Message()) @@ -298,10 +306,10 @@ func (r *ROSAMachinePoolReconciler) reconcileNormal(ctx context.Context, nodePool, err = ocmClient.CreateNodePool(machinePoolScope.ControlPlane.Status.ID, nodePoolSpec) if err != nil { - conditions.MarkFalse(rosaMachinePool, + v1beta1conditions.MarkFalse(rosaMachinePool, expinfrav1.RosaMachinePoolReadyCondition, expinfrav1.RosaMachinePoolReconciliationFailedReason, - clusterv1.ConditionSeverityError, + clusterv1beta1.ConditionSeverityError, "failed to create ROSAMachinePool: %s", err.Error()) return ctrl.Result{}, fmt.Errorf("failed to create nodepool: %w", err) } @@ -342,7 +350,7 @@ func (r *ROSAMachinePoolReconciler) reconcileMachinePoolVersion(machinePoolScope version := machinePoolScope.RosaMachinePool.Spec.Version if version == "" || version == rosa.RawVersionID(nodePool.Version()) { machinePoolScope.RosaMachinePool.Status.AvailableUpgrades = nodePool.Version().AvailableUpgrades() - conditions.MarkFalse(machinePoolScope.RosaMachinePool, expinfrav1.RosaMachinePoolUpgradingCondition, "upgraded", clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(machinePoolScope.RosaMachinePool, expinfrav1.RosaMachinePoolUpgradingCondition, "upgraded", clusterv1beta1.ConditionSeverityInfo, "") return nil } @@ -359,13 +367,13 @@ func (r *ROSAMachinePoolReconciler) reconcileMachinePoolVersion(machinePoolScope } } - condition := &clusterv1.Condition{ + condition := &clusterv1beta1.Condition{ Type: expinfrav1.RosaMachinePoolUpgradingCondition, Status: corev1.ConditionTrue, Reason: string(scheduledUpgrade.State().Value()), Message: fmt.Sprintf("Upgrading to version %s", scheduledUpgrade.Version()), } - conditions.Set(machinePoolScope.RosaMachinePool, condition) + v1beta1conditions.Set(machinePoolScope.RosaMachinePool, condition) // if nodePool is already upgrading to another version we need to wait until the current upgrade is finished, return an error to requeue and try later. if scheduledUpgrade.Version() != version { @@ -410,10 +418,10 @@ func (r *ROSAMachinePoolReconciler) updateNodePool(machinePoolScope *scope.RosaM updatedNodePool, err := ocmClient.UpdateNodePool(machinePoolScope.ControlPlane.Status.ID, nodePoolSpec) if err != nil { - conditions.MarkFalse(machinePoolScope.RosaMachinePool, + v1beta1conditions.MarkFalse(machinePoolScope.RosaMachinePool, expinfrav1.RosaMachinePoolReadyCondition, expinfrav1.RosaMachinePoolReconciliationFailedReason, - clusterv1.ConditionSeverityError, + clusterv1beta1.ConditionSeverityError, "failed to update ROSAMachinePool: %s", err.Error()) return nil, fmt.Errorf("failed to update nodePool: %w", err) } @@ -459,7 +467,7 @@ func validateMachinePoolSpec(machinePoolScope *scope.RosaMachinePoolScope) (*str return nil, nil } -func nodePoolBuilder(rosaMachinePoolSpec expinfrav1.RosaMachinePoolSpec, machinePoolSpec expclusterv1.MachinePoolSpec, controlPlaneChannelGroup rosacontrolplanev1.ChannelGroupType) *cmv1.NodePoolBuilder { +func nodePoolBuilder(rosaMachinePoolSpec expinfrav1.RosaMachinePoolSpec, machinePoolSpec clusterv1.MachinePoolSpec, controlPlaneChannelGroup rosacontrolplanev1.ChannelGroupType) *cmv1.NodePoolBuilder { npBuilder := cmv1.NewNodePool().ID(rosaMachinePoolSpec.NodePoolName). Labels(rosaMachinePoolSpec.Labels). AutoRepair(rosaMachinePoolSpec.AutoRepair) @@ -504,6 +512,10 @@ func nodePoolBuilder(rosaMachinePoolSpec expinfrav1.RosaMachinePoolSpec, machine if rosaMachinePoolSpec.VolumeSize > 75 { awsNodePool = awsNodePool.RootVolume(cmv1.NewAWSVolume().Size(rosaMachinePoolSpec.VolumeSize)) } + if rosaMachinePoolSpec.CapacityReservationID != "" { + capacityReservation := cmv1.NewAWSCapacityReservation().Id(rosaMachinePoolSpec.CapacityReservationID) + awsNodePool = awsNodePool.CapacityReservation(capacityReservation) + } npBuilder.AWSNodePool(awsNodePool) if rosaMachinePoolSpec.Version != "" { @@ -602,7 +614,7 @@ func rosaControlPlaneToRosaMachinePoolMapFunc(c client.Client, gvk schema.GroupV return nil } - managedPoolForClusterList := expclusterv1.MachinePoolList{} + managedPoolForClusterList := clusterv1.MachinePoolList{} if err := c.List( ctx, &managedPoolForClusterList, client.InNamespace(clusterKey.Namespace), client.MatchingLabels{clusterv1.ClusterNameLabel: clusterKey.Name}, ); err != nil { diff --git a/exp/controllers/rosamachinepool_controller_test.go b/exp/controllers/rosamachinepool_controller_test.go index 553cc38922..5e20aa170f 100644 --- a/exp/controllers/rosamachinepool_controller_test.go +++ b/exp/controllers/rosamachinepool_controller_test.go @@ -30,8 +30,8 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/patch" ) @@ -70,9 +70,10 @@ func TestNodePoolToRosaMachinePoolSpec(t *testing.T) { Effect: corev1.TaintEffectNoExecute, }, }, + CapacityReservationID: "capacity-reservation-id", } - machinePoolSpec := expclusterv1.MachinePoolSpec{ + machinePoolSpec := clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](2), } @@ -113,7 +114,8 @@ func TestRosaMachinePoolReconcile(t *testing.T) { return &rosacontrolplanev1.ROSAControlPlane{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("rosa-control-plane-%v", i), - Namespace: ns.Name}, + Namespace: ns.Name, + }, TypeMeta: metav1.TypeMeta{ Kind: "ROSAControlPlane", APIVersion: rosacontrolplanev1.GroupVersion.String(), @@ -127,10 +129,19 @@ func TestRosaMachinePoolReconcile(t *testing.T) { PodCIDR: "10.128.0.0/14", ServiceCIDR: "172.30.0.0/16", }, - Region: "us-east-1", - Version: "4.15.20", - ChannelGroup: "stable", - RolesRef: rosacontrolplanev1.AWSRolesRef{}, + Region: "us-east-1", + Version: "4.15.20", + ChannelGroup: "stable", + RolesRef: rosacontrolplanev1.AWSRolesRef{ + IngressARN: "op-arn1", + ImageRegistryARN: "op-arn2", + StorageARN: "op-arn3", + NetworkARN: "op-arn4", + KubeCloudControllerARN: "op-arn5", + NodePoolManagementARN: "op-arn6", + ControlPlaneOperatorARN: "op-arn7", + KMSProviderARN: "op-arn8", + }, OIDCID: "iodcid1", InstallerRoleARN: "arn1", WorkerRoleARN: "arn2", @@ -159,10 +170,10 @@ func TestRosaMachinePoolReconcile(t *testing.T) { Namespace: ns.Name, }, Spec: clusterv1.ClusterSpec{ - ControlPlaneRef: &corev1.ObjectReference{ - Name: rosaControlPlane(i).Name, - Kind: "ROSAControlPlane", - APIVersion: rosacontrolplanev1.GroupVersion.String(), + ControlPlaneRef: clusterv1.ContractVersionedObjectReference{ + Name: rosaControlPlane(i).Name, + Kind: "ROSAControlPlane", + APIGroup: rosacontrolplanev1.GroupVersion.Group, }, }, } @@ -188,8 +199,8 @@ func TestRosaMachinePoolReconcile(t *testing.T) { } } - ownerMachinePool := func(i int) *expclusterv1.MachinePool { - return &expclusterv1.MachinePool{ + ownerMachinePool := func(i int) *clusterv1.MachinePool { + return &clusterv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("machinepool-%v", i), Namespace: ns.Name, @@ -200,17 +211,22 @@ func TestRosaMachinePoolReconcile(t *testing.T) { Kind: "MachinePool", APIVersion: clusterv1.GroupVersion.String(), }, - Spec: expclusterv1.MachinePoolSpec{ + Spec: clusterv1.MachinePoolSpec{ ClusterName: fmt.Sprintf("owner-cluster-%v", i), Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ ClusterName: fmt.Sprintf("owner-cluster-%v", i), - InfrastructureRef: corev1.ObjectReference{ - UID: rosaMachinePool(i).UID, - Name: rosaMachinePool(i).Name, - Namespace: ns.Namespace, - Kind: "ROSAMachinePool", - APIVersion: expclusterv1.GroupVersion.String(), + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Name: rosaMachinePool(i).Name, + Kind: "ROSAMachinePool", + APIGroup: clusterv1.GroupVersion.Group, + }, + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: clusterv1.ContractVersionedObjectReference{ + Name: fmt.Sprintf("%s-config", rosaMachinePool(i).Name), + Kind: "EKSConfig", + APIGroup: clusterv1.GroupVersion.Group, + }, }, }, }, @@ -222,7 +238,7 @@ func TestRosaMachinePoolReconcile(t *testing.T) { name string newROSAMachinePool *expinfrav1.ROSAMachinePool oldROSAMachinePool *expinfrav1.ROSAMachinePool - machinePool *expclusterv1.MachinePool + machinePool *clusterv1.MachinePool expect func(m *mocks.MockOCMClientMockRecorder) result reconcile.Result }{ @@ -350,7 +366,7 @@ func TestRosaMachinePoolReconcile(t *testing.T) { }, { name: "Create nodepool, replicas are set in MachinePool", - machinePool: &expclusterv1.MachinePool{ + machinePool: &clusterv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: ownerMachinePool(3).Name, Namespace: ns.Name, @@ -361,18 +377,23 @@ func TestRosaMachinePoolReconcile(t *testing.T) { Kind: "MachinePool", APIVersion: clusterv1.GroupVersion.String(), }, - Spec: expclusterv1.MachinePoolSpec{ + Spec: clusterv1.MachinePoolSpec{ ClusterName: ownerCluster(3).Name, Replicas: ptr.To[int32](2), Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ ClusterName: ownerCluster(3).Name, - InfrastructureRef: corev1.ObjectReference{ - UID: rosaMachinePool(3).UID, - Name: rosaMachinePool(3).Name, - Namespace: ns.Namespace, - Kind: "ROSAMachinePool", - APIVersion: expclusterv1.GroupVersion.String(), + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Name: rosaMachinePool(3).Name, + Kind: "ROSAMachinePool", + APIGroup: clusterv1.GroupVersion.Group, + }, + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: clusterv1.ContractVersionedObjectReference{ + Name: fmt.Sprintf("%s-config", rosaMachinePool(3).Name), + Kind: "EKSConfig", + APIGroup: clusterv1.GroupVersion.Group, + }, }, }, }, @@ -412,7 +433,7 @@ func TestRosaMachinePoolReconcile(t *testing.T) { }, { name: "Update nodepool, replicas are updated from MachinePool", - machinePool: &expclusterv1.MachinePool{ + machinePool: &clusterv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: ownerMachinePool(4).Name, Namespace: ns.Name, @@ -423,18 +444,23 @@ func TestRosaMachinePoolReconcile(t *testing.T) { Kind: "MachinePool", APIVersion: clusterv1.GroupVersion.String(), }, - Spec: expclusterv1.MachinePoolSpec{ + Spec: clusterv1.MachinePoolSpec{ ClusterName: ownerCluster(4).Name, Replicas: ptr.To[int32](2), Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ ClusterName: ownerCluster(4).Name, - InfrastructureRef: corev1.ObjectReference{ - UID: rosaMachinePool(4).UID, - Name: rosaMachinePool(4).Name, - Namespace: ns.Namespace, - Kind: "ROSAMachinePool", - APIVersion: expclusterv1.GroupVersion.String(), + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Name: rosaMachinePool(4).Name, + Kind: "ROSAMachinePool", + APIGroup: clusterv1.GroupVersion.Group, + }, + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: clusterv1.ContractVersionedObjectReference{ + Name: fmt.Sprintf("%s-config", rosaMachinePool(3).Name), + Kind: "EKSConfig", + APIGroup: clusterv1.GroupVersion.Group, + }, }, }, }, @@ -526,11 +552,13 @@ func TestRosaMachinePoolReconcile(t *testing.T) { // patch status conditions rmpPh, err := patch.NewHelper(test.oldROSAMachinePool, testEnv) - test.oldROSAMachinePool.Status.Conditions = clusterv1.Conditions{ + test.oldROSAMachinePool.Status.Conditions = clusterv1beta1.Conditions{ { - Type: "Paused", - Status: corev1.ConditionFalse, - Reason: "NotPaused", + Type: "Paused", + Status: corev1.ConditionFalse, + Reason: "NotPaused", + Message: "", + LastTransitionTime: metav1.NewTime(time.Now()), }, } diff --git a/exp/controllers/rosanetwork_controller.go b/exp/controllers/rosanetwork_controller.go new file mode 100644 index 0000000000..c84b360185 --- /dev/null +++ b/exp/controllers/rosanetwork_controller.go @@ -0,0 +1,320 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "errors" + "fmt" + "maps" + "slices" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + cloudformationtypes "github.com/aws/aws-sdk-go-v2/service/cloudformation/types" + "github.com/aws/smithy-go" + "github.com/go-logr/logr" + rosaCFNetwork "github.com/openshift/rosa/cmd/create/network" + rosaAWSClient "github.com/openshift/rosa/pkg/aws" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + "sigs.k8s.io/cluster-api/util/predicates" +) + +// ROSANetworkReconciler reconciles a ROSANetwork object. +type ROSANetworkReconciler struct { + client.Client + Log logr.Logger + Scheme *runtime.Scheme + awsClient rosaAWSClient.Client + cfStack *cloudformationtypes.Stack + WatchFilterValue string +} + +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=rosanetworks,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=rosanetworks/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=rosanetworks/finalizers,verbs=update + +// Reconcile reconcile ROSANetwork. +func (r *ROSANetworkReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, reterr error) { + log := logger.FromContext(ctx) + + // Get the rosanetwork instance + rosaNetwork := &expinfrav1.ROSANetwork{} + if err := r.Client.Get(ctx, req.NamespacedName, rosaNetwork); err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + log.Info("error getting ROSANetwork: %w", err) + return ctrl.Result{Requeue: true}, nil + } + + rosaNetworkScope, err := scope.NewROSANetworkScope(scope.ROSANetworkScopeParams{ + Client: r.Client, + ROSANetwork: rosaNetwork, + ControllerName: "rosanetwork", + Logger: log, + }) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to create rosanetwork scope: %w", err) + } + + // Create a new AWS/CloudFormation Client using the session cache + if r.awsClient == nil { + session := rosaNetworkScope.Session() + logger := rosaNetworkScope.Logger.GetLogger() + awsClient, err := rosaAWSClient.NewClient(). + CapaLogger(&logger). + ExternalConfig(&session). + Build() + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to create AWS Client: %w", err) + } + r.awsClient = awsClient + } + + // Try to fetch CF stack with a given name + r.cfStack, err = r.awsClient.GetCFStack(ctx, rosaNetworkScope.ROSANetwork.Spec.StackName) + if err != nil { + var apiErr smithy.APIError // in case the stack does not exist, AWS returns ValidationError + if errors.As(err, &apiErr) && apiErr.ErrorCode() == "ValidationError" { + r.cfStack = nil + } else { + return ctrl.Result{}, fmt.Errorf("error fetching CF stack details: %w", err) + } + } + + // Always close the scope + defer func() { + if err := rosaNetworkScope.PatchObject(); err != nil { + reterr = errors.Join(reterr, err) + } + }() + + if !rosaNetwork.ObjectMeta.DeletionTimestamp.IsZero() { + // Handle deletion reconciliation loop. + return r.reconcileDelete(ctx, rosaNetworkScope) + } + + // Handle normal reconciliation loop. + return r.reconcileNormal(ctx, rosaNetworkScope) +} + +func (r *ROSANetworkReconciler) reconcileNormal(ctx context.Context, rosaNetScope *scope.ROSANetworkScope) (res ctrl.Result, reterr error) { + rosaNetScope.Info("Reconciling ROSANetwork") + + if controllerutil.AddFinalizer(rosaNetScope.ROSANetwork, expinfrav1.ROSANetworkFinalizer) { + if err := rosaNetScope.PatchObject(); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to patch ROSANetwork: %w", err) + } + } + + if r.cfStack == nil { // The CF stack does not exist yet + templateBody := string(rosaCFNetwork.CloudFormationTemplateFile) + + zoneCount := 1 + if rosaNetScope.ROSANetwork.Spec.AvailabilityZoneCount > 0 { + zoneCount = rosaNetScope.ROSANetwork.Spec.AvailabilityZoneCount + } + cfParams := map[string]string{ + "AvailabilityZoneCount": strconv.Itoa(zoneCount), + "Region": rosaNetScope.ROSANetwork.Spec.Region, + "Name": rosaNetScope.ROSANetwork.Spec.StackName, + "VpcCidr": rosaNetScope.ROSANetwork.Spec.CIDRBlock, + } + // Explicitly specified AZs + for idx, zone := range rosaNetScope.ROSANetwork.Spec.AvailabilityZones { + cfParams[fmt.Sprintf("AZ%d", (idx+1))] = zone + } + + // Call the AWS CF stack create API + _, err := r.awsClient.CreateStackWithParamsTags(ctx, templateBody, rosaNetScope.ROSANetwork.Spec.StackName, cfParams, rosaNetScope.ROSANetwork.Spec.StackTags) + if err != nil { + v1beta1conditions.MarkFalse(rosaNetScope.ROSANetwork, + expinfrav1.ROSANetworkReadyCondition, + expinfrav1.ROSANetworkFailedReason, + clusterv1beta1.ConditionSeverityError, + "%s", + err.Error()) + return ctrl.Result{}, fmt.Errorf("failed to start CF stack creation: %w", err) + } + v1beta1conditions.MarkFalse(rosaNetScope.ROSANetwork, + expinfrav1.ROSANetworkReadyCondition, + expinfrav1.ROSANetworkCreatingReason, + clusterv1beta1.ConditionSeverityInfo, + "") + return ctrl.Result{}, nil + } + // The cloudformation stack already exists + if err := r.updateROSANetworkResources(ctx, rosaNetScope.ROSANetwork); err != nil { + rosaNetScope.Info("error fetching CF stack resources: %w", err) + return ctrl.Result{RequeueAfter: time.Second * 60}, nil + } + + switch r.cfStack.StackStatus { + case cloudformationtypes.StackStatusCreateInProgress: // Create in progress + // Set the reason of false ROSANetworkReadyCondition to Creating + v1beta1conditions.MarkFalse(rosaNetScope.ROSANetwork, + expinfrav1.ROSANetworkReadyCondition, + expinfrav1.ROSANetworkCreatingReason, + clusterv1beta1.ConditionSeverityInfo, + "") + return ctrl.Result{RequeueAfter: time.Second * 60}, nil + case cloudformationtypes.StackStatusCreateComplete: // Create complete + if err := r.parseSubnets(rosaNetScope.ROSANetwork); err != nil { + return ctrl.Result{}, fmt.Errorf("parsing stack subnets failed: %w", err) + } + + // Set the reason of true ROSANetworkReadyCondition to Created + // We have to use v1beta1conditions.Set(), since v1beta1conditions.MarkTrue() does not support setting reason + v1beta1conditions.Set(rosaNetScope.ROSANetwork, + &clusterv1beta1.Condition{ + Type: expinfrav1.ROSANetworkReadyCondition, + Status: corev1.ConditionTrue, + Reason: expinfrav1.ROSANetworkCreatedReason, + Severity: clusterv1beta1.ConditionSeverityInfo, + }) + return ctrl.Result{}, nil + case cloudformationtypes.StackStatusCreateFailed: // Create failed + // Set the reason of false ROSANetworkReadyCondition to Failed + v1beta1conditions.MarkFalse(rosaNetScope.ROSANetwork, + expinfrav1.ROSANetworkReadyCondition, + expinfrav1.ROSANetworkFailedReason, + clusterv1beta1.ConditionSeverityError, + "") + return ctrl.Result{}, fmt.Errorf("cloudformation stack %s creation failed, see the stack resources for more information", *r.cfStack.StackName) + } + + return ctrl.Result{}, nil +} + +func (r *ROSANetworkReconciler) reconcileDelete(ctx context.Context, rosaNetScope *scope.ROSANetworkScope) (res ctrl.Result, reterr error) { + rosaNetScope.Info("Reconciling ROSANetwork delete") + + if r.cfStack != nil { // The CF stack still exists + if err := r.updateROSANetworkResources(ctx, rosaNetScope.ROSANetwork); err != nil { + rosaNetScope.Info("error fetching CF stack resources: %w", err) + return ctrl.Result{RequeueAfter: time.Second * 60}, nil + } + + switch r.cfStack.StackStatus { + case cloudformationtypes.StackStatusDeleteInProgress: // Deletion in progress + return ctrl.Result{RequeueAfter: time.Second * 60}, nil + case cloudformationtypes.StackStatusDeleteFailed: // Deletion failed + v1beta1conditions.MarkFalse(rosaNetScope.ROSANetwork, + expinfrav1.ROSANetworkReadyCondition, + expinfrav1.ROSANetworkDeletionFailedReason, + clusterv1beta1.ConditionSeverityError, + "") + return ctrl.Result{}, fmt.Errorf("CF stack deletion failed") + default: // All the other states + err := r.awsClient.DeleteCFStack(ctx, rosaNetScope.ROSANetwork.Spec.StackName) + if err != nil { + v1beta1conditions.MarkFalse(rosaNetScope.ROSANetwork, + expinfrav1.ROSANetworkReadyCondition, + expinfrav1.ROSANetworkDeletionFailedReason, + clusterv1beta1.ConditionSeverityError, + "%s", + err.Error()) + return ctrl.Result{}, fmt.Errorf("failed to start CF stack deletion: %w", err) + } + v1beta1conditions.MarkFalse(rosaNetScope.ROSANetwork, + expinfrav1.ROSANetworkReadyCondition, + expinfrav1.ROSANetworkDeletingReason, + clusterv1beta1.ConditionSeverityInfo, + "") + return ctrl.Result{RequeueAfter: time.Second * 60}, nil + } + } else { + controllerutil.RemoveFinalizer(rosaNetScope.ROSANetwork, expinfrav1.ROSANetworkFinalizer) + } + + return ctrl.Result{}, nil +} + +func (r *ROSANetworkReconciler) updateROSANetworkResources(ctx context.Context, rosaNet *expinfrav1.ROSANetwork) error { + resources, err := r.awsClient.DescribeCFStackResources(ctx, rosaNet.Spec.StackName) + if err != nil { + return fmt.Errorf("error calling AWS DescribeStackResources(): %w", err) + } + + rosaNet.Status.Resources = make([]expinfrav1.CFResource, len(*resources)) + for i, resource := range *resources { + rosaNet.Status.Resources[i] = expinfrav1.CFResource{ + LogicalID: aws.ToString(resource.LogicalResourceId), + PhysicalID: aws.ToString(resource.PhysicalResourceId), + ResourceType: aws.ToString(resource.ResourceType), + Status: string(resource.ResourceStatus), + Reason: aws.ToString(resource.ResourceStatusReason), + } + } + + return nil +} + +func (r *ROSANetworkReconciler) parseSubnets(rosaNet *expinfrav1.ROSANetwork) error { + subnets := make(map[string]expinfrav1.ROSANetworkSubnet) + + for _, resource := range rosaNet.Status.Resources { + if resource.ResourceType != "AWS::EC2::Subnet" { + continue + } + + az, err := r.awsClient.GetSubnetAvailabilityZone(resource.PhysicalID) + if err != nil { + return fmt.Errorf("failed to get AZ for subnet %s: %w", resource.PhysicalID, err) + } + + subnet := subnets[az] + subnet.AvailabilityZone = az + + if strings.HasPrefix(resource.LogicalID, "SubnetPrivate") { + subnet.PrivateSubnet = resource.PhysicalID + } else { + subnet.PublicSubnet = resource.PhysicalID + } + + subnets[az] = subnet + } + + rosaNet.Status.Subnets = slices.Collect(maps.Values(subnets)) + + return nil +} + +// SetupWithManager is used to setup the controller. +func (r *ROSANetworkReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { + log := logger.FromContext(ctx) + return ctrl.NewControllerManagedBy(mgr). + WithOptions(options). + WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), log.GetLogger(), r.WatchFilterValue)). + For(&expinfrav1.ROSANetwork{}). + Complete(r) +} diff --git a/exp/controllers/rosanetwork_controller_test.go b/exp/controllers/rosanetwork_controller_test.go new file mode 100644 index 0000000000..e421ddd112 --- /dev/null +++ b/exp/controllers/rosanetwork_controller_test.go @@ -0,0 +1,688 @@ +/* +Copyright The Kubernetes Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + "testing" + "time" + + awsSdk "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/cloudformation" + cloudformationtypes "github.com/aws/aws-sdk-go-v2/service/cloudformation/types" + "github.com/aws/aws-sdk-go-v2/service/ec2" + ec2Types "github.com/aws/aws-sdk-go-v2/service/ec2/types" + stsv2 "github.com/aws/aws-sdk-go-v2/service/sts" + "github.com/aws/smithy-go" + . "github.com/onsi/gomega" + rosaAWSClient "github.com/openshift/rosa/pkg/aws" + rosaMocks "github.com/openshift/rosa/pkg/aws/mocks" + "github.com/sirupsen/logrus" + gomock "go.uber.org/mock/gomock" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" +) + +func TestROSANetworkReconciler_Reconcile(t *testing.T) { + g := NewWithT(t) + ns, err := testEnv.CreateNamespace(ctx, "test-namespace") + g.Expect(err).ToNot(HaveOccurred()) + + mockCtrl := gomock.NewController(t) + ctx := context.TODO() + + identity := &infrav1.AWSClusterControllerIdentity{ + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + }, + Spec: infrav1.AWSClusterControllerIdentitySpec{ + AWSClusterIdentitySpec: infrav1.AWSClusterIdentitySpec{ + AllowedNamespaces: &infrav1.AllowedNamespaces{}, + }, + }, + } + + name := "test-rosa-network" + rosaNetwork := &expinfrav1.ROSANetwork{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns.Name, + }, + Spec: expinfrav1.ROSANetworkSpec{ + StackName: name, + CIDRBlock: "10.0.0.0/8", + AvailabilityZoneCount: 1, + Region: "test-region", + IdentityRef: &infrav1.AWSIdentityReference{ + Name: identity.Name, + Kind: infrav1.ControllerIdentityKind, + }, + }, + } + + createObject(g, identity, ns.Name) + createObject(g, rosaNetwork, ns.Name) + + nameDeleted := "test-rosa-network-deleted" + rosaNetworkDeleted := &expinfrav1.ROSANetwork{ + ObjectMeta: metav1.ObjectMeta{ + Name: nameDeleted, + Namespace: ns.Name, + }, + Spec: expinfrav1.ROSANetworkSpec{ + StackName: nameDeleted, + CIDRBlock: "10.0.0.0/8", + AvailabilityZoneCount: 1, + Region: "test-region", + IdentityRef: &infrav1.AWSIdentityReference{ + Name: identity.Name, + Kind: infrav1.ControllerIdentityKind, + }, + }, + } + controllerutil.AddFinalizer(rosaNetworkDeleted, expinfrav1.ROSANetworkFinalizer) + createObject(g, rosaNetworkDeleted, ns.Name) + err = deleteROSANetwork(ctx, rosaNetworkDeleted) + g.Expect(err).NotTo(HaveOccurred()) + + t.Run("Empty result when ROSANetwork object not found", func(t *testing.T) { + _, _, _, reconciler := createMockClients(mockCtrl) + + req := ctrl.Request{} + req.NamespacedName = types.NamespacedName{Name: "non-existent-object", Namespace: "non-existent-namespace"} + reqReconcile, errReconcile := reconciler.Reconcile(ctx, req) + + g.Expect(reqReconcile.RequeueAfter).To(Equal(time.Duration(0))) + g.Expect(errReconcile).ToNot(HaveOccurred()) + }) + + t.Run("Error result when CF stack GET returns error", func(t *testing.T) { + _, mockCFClient, mockSTSClient, reconciler := createMockClients(mockCtrl) + + mockSTSIdentity(mockSTSClient) + mockDescribeStacksCall(mockCFClient, &cloudformation.DescribeStacksOutput{}, fmt.Errorf("test-error"), 1) + + req := ctrl.Request{} + req.NamespacedName = types.NamespacedName{Name: rosaNetwork.Name, Namespace: rosaNetwork.Namespace} + reqReconcile, errReconcile := reconciler.Reconcile(ctx, req) + + g.Expect(reqReconcile.RequeueAfter).To(Equal(time.Duration(0))) + g.Expect(errReconcile).To(MatchError(ContainSubstring("error fetching CF stack details:"))) + }) + + t.Run("Initial CF stack creation fails", func(t *testing.T) { + _, mockCFClient, mockSTSClient, reconciler := createMockClients(mockCtrl) + + mockSTSIdentity(mockSTSClient) + + describeStacksOutput := &cloudformation.DescribeStacksOutput{} + validationErr := &smithy.GenericAPIError{ + Code: "ValidationError", + Message: "ValidationError", + Fault: smithy.FaultServer, + } + + mockDescribeStacksCall(mockCFClient, describeStacksOutput, validationErr, 1) + mockCreateStackCall(mockCFClient, &cloudformation.CreateStackOutput{}, fmt.Errorf("test-error"), 1) + + req := ctrl.Request{} + req.NamespacedName = types.NamespacedName{Name: rosaNetwork.Name, Namespace: rosaNetwork.Namespace} + reqReconcile, errReconcile := reconciler.Reconcile(ctx, req) + + g.Expect(reqReconcile.RequeueAfter).To(Equal(time.Duration(0))) + g.Expect(errReconcile).To(MatchError(ContainSubstring("failed to start CF stack creation:"))) + + cnd, err := getROSANetworkReadyCondition(reconciler, rosaNetwork) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(cnd).ToNot(BeNil()) + g.Expect(cnd.Reason).To(Equal(expinfrav1.ROSANetworkFailedReason)) + g.Expect(cnd.Severity).To(Equal(clusterv1beta1.ConditionSeverityError)) + g.Expect(cnd.Message).To(Equal("test-error")) + }) + + t.Run("Initial CF stack creation succeeds", func(t *testing.T) { + _, mockCFClient, mockSTSClient, reconciler := createMockClients(mockCtrl) + + mockSTSIdentity(mockSTSClient) + + describeStacksOutput := &cloudformation.DescribeStacksOutput{} + validationErr := &smithy.GenericAPIError{ + Code: "ValidationError", + Message: "ValidationError", + Fault: smithy.FaultServer, + } + + mockDescribeStacksCall(mockCFClient, describeStacksOutput, validationErr, 1) + mockCreateStackCall(mockCFClient, &cloudformation.CreateStackOutput{}, nil, 1) + + req := ctrl.Request{} + req.NamespacedName = types.NamespacedName{Name: rosaNetwork.Name, Namespace: rosaNetwork.Namespace} + reqReconcile, errReconcile := reconciler.Reconcile(ctx, req) + + g.Expect(reqReconcile.RequeueAfter).To(Equal(time.Duration(0))) + g.Expect(errReconcile).ToNot(HaveOccurred()) + + cnd, err := getROSANetworkReadyCondition(reconciler, rosaNetwork) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(cnd).ToNot(BeNil()) + g.Expect(cnd.Reason).To(Equal(expinfrav1.ROSANetworkCreatingReason)) + g.Expect(cnd.Severity).To(Equal(clusterv1beta1.ConditionSeverityInfo)) + }) + + t.Run("CF stack creation is in progress", func(t *testing.T) { + _, mockCFClient, mockSTSClient, reconciler := createMockClients(mockCtrl) + + mockSTSIdentity(mockSTSClient) + + describeStacksOutput := &cloudformation.DescribeStacksOutput{ + Stacks: []cloudformationtypes.Stack{ + { + StackName: &name, + StackStatus: cloudformationtypes.StackStatusCreateInProgress, + }, + }, + } + mockDescribeStacksCall(mockCFClient, describeStacksOutput, nil, 1) + + mockDescribeStackResourcesCall(mockCFClient, &cloudformation.DescribeStackResourcesOutput{}, nil, 1) + + req := ctrl.Request{} + req.NamespacedName = types.NamespacedName{Name: rosaNetwork.Name, Namespace: rosaNetwork.Namespace} + reqReconcile, errReconcile := reconciler.Reconcile(ctx, req) + + g.Expect(reqReconcile.RequeueAfter).To(Equal(time.Second * 60)) + g.Expect(errReconcile).ToNot(HaveOccurred()) + + cnd, err := getROSANetworkReadyCondition(reconciler, rosaNetwork) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(cnd).ToNot(BeNil()) + g.Expect(cnd.Reason).To(Equal(expinfrav1.ROSANetworkCreatingReason)) + g.Expect(cnd.Severity).To(Equal(clusterv1beta1.ConditionSeverityInfo)) + }) + + t.Run("CF stack creation completed", func(t *testing.T) { + _, mockCFClient, mockSTSClient, reconciler := createMockClients(mockCtrl) + + mockSTSIdentity(mockSTSClient) + + describeStacksOutput := &cloudformation.DescribeStacksOutput{ + Stacks: []cloudformationtypes.Stack{ + { + StackName: &name, + StackStatus: cloudformationtypes.StackStatusCreateComplete, + }, + }, + } + mockDescribeStacksCall(mockCFClient, describeStacksOutput, nil, 1) + + mockDescribeStackResourcesCall(mockCFClient, &cloudformation.DescribeStackResourcesOutput{}, nil, 1) + + req := ctrl.Request{} + req.NamespacedName = types.NamespacedName{Name: rosaNetwork.Name, Namespace: rosaNetwork.Namespace} + reqReconcile, errReconcile := reconciler.Reconcile(ctx, req) + + g.Expect(reqReconcile.RequeueAfter).To(Equal(time.Duration(0))) + g.Expect(errReconcile).ToNot(HaveOccurred()) + + cnd, err := getROSANetworkReadyCondition(reconciler, rosaNetwork) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(cnd).ToNot(BeNil()) + g.Expect(cnd.Reason).To(Equal(expinfrav1.ROSANetworkCreatedReason)) + g.Expect(cnd.Severity).To(Equal(clusterv1beta1.ConditionSeverityInfo)) + }) + + t.Run("CF stack creation failed", func(t *testing.T) { + _, mockCFClient, mockSTSClient, reconciler := createMockClients(mockCtrl) + + mockSTSIdentity(mockSTSClient) + + describeStacksOutput := &cloudformation.DescribeStacksOutput{ + Stacks: []cloudformationtypes.Stack{ + { + StackName: &name, + StackStatus: cloudformationtypes.StackStatusCreateFailed, + }, + }, + } + mockDescribeStacksCall(mockCFClient, describeStacksOutput, nil, 1) + + mockDescribeStackResourcesCall(mockCFClient, &cloudformation.DescribeStackResourcesOutput{}, nil, 1) + + req := ctrl.Request{} + req.NamespacedName = types.NamespacedName{Name: rosaNetwork.Name, Namespace: rosaNetwork.Namespace} + reqReconcile, errReconcile := reconciler.Reconcile(ctx, req) + + g.Expect(reqReconcile.RequeueAfter).To(Equal(time.Duration(0))) + g.Expect(errReconcile).To(MatchError(ContainSubstring("creation failed"))) + + cnd, err := getROSANetworkReadyCondition(reconciler, rosaNetwork) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(cnd).ToNot(BeNil()) + g.Expect(cnd.Reason).To(Equal(expinfrav1.ROSANetworkFailedReason)) + g.Expect(cnd.Severity).To(Equal(clusterv1beta1.ConditionSeverityError)) + }) + + t.Run("CF stack deletion start failed", func(t *testing.T) { + _, mockCFClient, mockSTSClient, reconciler := createMockClients(mockCtrl) + + mockSTSIdentity(mockSTSClient) + + describeStacksOutput := &cloudformation.DescribeStacksOutput{ + Stacks: []cloudformationtypes.Stack{ + { + StackName: &nameDeleted, + StackStatus: cloudformationtypes.StackStatusCreateComplete, + }, + }, + } + mockDescribeStacksCall(mockCFClient, describeStacksOutput, nil, 1) + + mockDescribeStackResourcesCall(mockCFClient, &cloudformation.DescribeStackResourcesOutput{}, nil, 1) + + mockDeleteStackCall(mockCFClient, &cloudformation.DeleteStackOutput{}, fmt.Errorf("test-error"), 1) + + req := ctrl.Request{} + req.NamespacedName = types.NamespacedName{Name: nameDeleted, Namespace: rosaNetworkDeleted.Namespace} + reqReconcile, errReconcile := reconciler.Reconcile(ctx, req) + + g.Expect(reqReconcile.RequeueAfter).To(Equal(time.Duration(0))) + g.Expect(errReconcile).To(MatchError(ContainSubstring("failed to start CF stack deletion:"))) + + cnd, err := getROSANetworkReadyCondition(reconciler, rosaNetworkDeleted) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(cnd).ToNot(BeNil()) + g.Expect(cnd.Reason).To(Equal(expinfrav1.ROSANetworkDeletionFailedReason)) + g.Expect(cnd.Severity).To(Equal(clusterv1beta1.ConditionSeverityError)) + }) + + t.Run("CF stack deletion start succeeded", func(t *testing.T) { + _, mockCFClient, mockSTSClient, reconciler := createMockClients(mockCtrl) + + mockSTSIdentity(mockSTSClient) + + describeStacksOutput := &cloudformation.DescribeStacksOutput{ + Stacks: []cloudformationtypes.Stack{ + { + StackName: &nameDeleted, + StackStatus: cloudformationtypes.StackStatusCreateComplete, + }, + }, + } + mockDescribeStacksCall(mockCFClient, describeStacksOutput, nil, 1) + + mockDescribeStackResourcesCall(mockCFClient, &cloudformation.DescribeStackResourcesOutput{}, nil, 1) + + mockDeleteStackCall(mockCFClient, &cloudformation.DeleteStackOutput{}, nil, 1) + + req := ctrl.Request{} + req.NamespacedName = types.NamespacedName{Name: nameDeleted, Namespace: rosaNetworkDeleted.Namespace} + reqReconcile, errReconcile := reconciler.Reconcile(ctx, req) + + g.Expect(reqReconcile.RequeueAfter).To(Equal(60 * time.Second)) + g.Expect(errReconcile).NotTo(HaveOccurred()) + + cnd, err := getROSANetworkReadyCondition(reconciler, rosaNetworkDeleted) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(cnd).ToNot(BeNil()) + g.Expect(cnd.Reason).To(Equal(expinfrav1.ROSANetworkDeletingReason)) + g.Expect(cnd.Severity).To(Equal(clusterv1beta1.ConditionSeverityInfo)) + }) + + t.Run("CF stack deletion in progress", func(t *testing.T) { + _, mockCFClient, mockSTSClient, reconciler := createMockClients(mockCtrl) + + mockSTSIdentity(mockSTSClient) + + describeStacksOutput := &cloudformation.DescribeStacksOutput{ + Stacks: []cloudformationtypes.Stack{ + { + StackName: &nameDeleted, + StackStatus: cloudformationtypes.StackStatusDeleteInProgress, + }, + }, + } + mockDescribeStacksCall(mockCFClient, describeStacksOutput, nil, 1) + + mockDescribeStackResourcesCall(mockCFClient, &cloudformation.DescribeStackResourcesOutput{}, nil, 1) + + req := ctrl.Request{} + req.NamespacedName = types.NamespacedName{Name: nameDeleted, Namespace: rosaNetworkDeleted.Namespace} + reqReconcile, errReconcile := reconciler.Reconcile(ctx, req) + + g.Expect(reqReconcile.RequeueAfter).To(Equal(60 * time.Second)) + g.Expect(errReconcile).NotTo(HaveOccurred()) + }) + + t.Run("CF stack deletion failed", func(t *testing.T) { + _, mockCFClient, mockSTSClient, reconciler := createMockClients(mockCtrl) + + mockSTSIdentity(mockSTSClient) + + describeStacksOutput := &cloudformation.DescribeStacksOutput{ + Stacks: []cloudformationtypes.Stack{ + { + StackName: &nameDeleted, + StackStatus: cloudformationtypes.StackStatusDeleteFailed, + }, + }, + } + + mockDescribeStacksCall(mockCFClient, describeStacksOutput, nil, 1) + + describeStackResourcesOutput := &cloudformation.DescribeStackResourcesOutput{ + StackResources: []cloudformationtypes.StackResource{}, + } + + mockDescribeStackResourcesCall(mockCFClient, describeStackResourcesOutput, nil, 1) + + req := ctrl.Request{} + req.NamespacedName = types.NamespacedName{Name: nameDeleted, Namespace: rosaNetworkDeleted.Namespace} + reqReconcile, errReconcile := reconciler.Reconcile(ctx, req) + + g.Expect(reqReconcile.RequeueAfter).To(Equal(time.Duration(0))) + g.Expect(errReconcile).To(MatchError(ContainSubstring("CF stack deletion failed"))) + + cnd, err := getROSANetworkReadyCondition(reconciler, rosaNetworkDeleted) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(cnd).ToNot(BeNil()) + g.Expect(cnd.Reason).To(Equal(expinfrav1.ROSANetworkDeletionFailedReason)) + g.Expect(cnd.Severity).To(Equal(clusterv1beta1.ConditionSeverityError)) + }) + + cleanupObject(g, rosaNetwork) + cleanupObject(g, rosaNetworkDeleted) + cleanupObject(g, identity) +} + +func TestROSANetworkReconciler_updateROSANetworkResources(t *testing.T) { + g := NewWithT(t) + mockCtrl := gomock.NewController(t) + ctx := context.TODO() + + rosaNetwork := &expinfrav1.ROSANetwork{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-rosa-network", + Namespace: "test-namespace", + }, + Spec: expinfrav1.ROSANetworkSpec{}, + Status: expinfrav1.ROSANetworkStatus{}, + } + + t.Run("Handle cloudformation client error", func(t *testing.T) { + _, mockCFClient, _, reconciler := createMockClients(mockCtrl) + + mockDescribeStackResourcesCall(mockCFClient, &cloudformation.DescribeStackResourcesOutput{}, fmt.Errorf("test-error"), 1) + + err := reconciler.updateROSANetworkResources(ctx, rosaNetwork) + g.Expect(err).To(HaveOccurred()) + g.Expect(len(rosaNetwork.Status.Resources)).To(Equal(0)) + }) + + t.Run("Update ROSANetwork.Status.Resources", func(t *testing.T) { + _, mockCFClient, _, reconciler := createMockClients(mockCtrl) + + logicalResourceID := "logical-resource-id" + resourceStatus := cloudformationtypes.ResourceStatusCreateComplete + resourceType := "resource-type" + resourceStatusReason := "resource-status-reason" + physicalResourceID := "physical-resource-id" + + describeStackResourcesOutput := &cloudformation.DescribeStackResourcesOutput{ + StackResources: []cloudformationtypes.StackResource{ + { + LogicalResourceId: &logicalResourceID, + ResourceStatus: resourceStatus, + ResourceType: &resourceType, + ResourceStatusReason: &resourceStatusReason, + PhysicalResourceId: &physicalResourceID, + }, + }, + } + + mockDescribeStackResourcesCall(mockCFClient, describeStackResourcesOutput, nil, 1) + + err := reconciler.updateROSANetworkResources(ctx, rosaNetwork) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(rosaNetwork.Status.Resources[0].LogicalID).To(Equal(logicalResourceID)) + g.Expect(rosaNetwork.Status.Resources[0].Status).To(Equal(string(resourceStatus))) + g.Expect(rosaNetwork.Status.Resources[0].ResourceType).To(Equal(resourceType)) + g.Expect(rosaNetwork.Status.Resources[0].Reason).To(Equal(resourceStatusReason)) + g.Expect(rosaNetwork.Status.Resources[0].PhysicalID).To(Equal(physicalResourceID)) + }) +} + +func TestROSANetworkReconciler_parseSubnets(t *testing.T) { + g := NewWithT(t) + mockCtrl := gomock.NewController(t) + + subnet1Id := "subnet1-physical-id" + subnet2Id := "subnet2-physical-id" + + rosaNetwork := &expinfrav1.ROSANetwork{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-rosa-network", + Namespace: "test-namespace", + }, + Spec: expinfrav1.ROSANetworkSpec{}, + Status: expinfrav1.ROSANetworkStatus{ + Resources: []expinfrav1.CFResource{ + { + ResourceType: "AWS::EC2::Subnet", + LogicalID: "SubnetPrivate", + PhysicalID: subnet1Id, + Status: "subnet1-status", + Reason: "subnet1-reason", + }, + { + ResourceType: "AWS::EC2::Subnet", + LogicalID: "SubnetPublic", + PhysicalID: subnet2Id, + Status: "subnet2-status", + Reason: "subnet2-reason", + }, + { + ResourceType: "bogus-type", + LogicalID: "bogus-logical-id", + PhysicalID: "bugus-physical-id", + Status: "bogus-status", + Reason: "bogus-reason", + }, + }, + }, + } + + t.Run("Handle EC2 client error", func(t *testing.T) { + mockEC2Client, _, _, reconciler := createMockClients(mockCtrl) + + mockDescribeSubnetsCall(mockEC2Client, &ec2.DescribeSubnetsOutput{}, nil, 1) + + err := reconciler.parseSubnets(rosaNetwork) + g.Expect(err).To(HaveOccurred()) + g.Expect(len(rosaNetwork.Status.Subnets)).To(Equal(0)) + }) + + t.Run("Update ROSANetwork.Status.Subnets", func(t *testing.T) { + mockEC2Client, _, _, reconciler := createMockClients(mockCtrl) + + az := "az01" + + describeSubnetsOutput := &ec2.DescribeSubnetsOutput{ + Subnets: []ec2Types.Subnet{ + { + AvailabilityZone: &az, + }, + }, + } + + mockDescribeSubnetsCall(mockEC2Client, describeSubnetsOutput, nil, 2) + + err := reconciler.parseSubnets(rosaNetwork) + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(rosaNetwork.Status.Subnets[0].AvailabilityZone).To(Equal(az)) + g.Expect(rosaNetwork.Status.Subnets[0].PrivateSubnet).To(Equal(subnet1Id)) + g.Expect(rosaNetwork.Status.Subnets[0].PublicSubnet).To(Equal(subnet2Id)) + }) +} + +func createMockClients(mockCtrl *gomock.Controller) (*rosaMocks.MockEc2ApiClient, *rosaMocks.MockCloudFormationApiClient, *rosaMocks.MockStsApiClient, *ROSANetworkReconciler) { + mockEC2Client := rosaMocks.NewMockEc2ApiClient(mockCtrl) + mockCFClient := rosaMocks.NewMockCloudFormationApiClient(mockCtrl) + mockSTSClient := rosaMocks.NewMockStsApiClient(mockCtrl) + awsClient := rosaAWSClient.New( + awsSdk.Config{}, + rosaAWSClient.NewLoggerWrapper(logrus.New(), nil), + rosaMocks.NewMockIamApiClient(mockCtrl), + mockEC2Client, + rosaMocks.NewMockOrganizationsApiClient(mockCtrl), + rosaMocks.NewMockS3ApiClient(mockCtrl), + rosaMocks.NewMockSecretsManagerApiClient(mockCtrl), + mockSTSClient, + mockCFClient, + rosaMocks.NewMockServiceQuotasApiClient(mockCtrl), + rosaMocks.NewMockServiceQuotasApiClient(mockCtrl), + &rosaAWSClient.AccessKey{}, + false, + ) + + reconciler := &ROSANetworkReconciler{ + Client: testEnv.Client, + awsClient: awsClient, + } + + return mockEC2Client, mockCFClient, mockSTSClient, reconciler +} + +func mockSTSIdentity(mockSTSClient *rosaMocks.MockStsApiClient) { + getCallerIdentityResult := &stsv2.GetCallerIdentityOutput{ + Account: awsSdk.String("foo"), + Arn: awsSdk.String("arn:aws:iam::123456789012:rosa/foo"), + } + mockSTSClient. + EXPECT(). + GetCallerIdentity(gomock.Any(), gomock.Any()). + Return(getCallerIdentityResult, nil). + AnyTimes() +} + +func mockDescribeStacksCall(mockCFClient *rosaMocks.MockCloudFormationApiClient, output *cloudformation.DescribeStacksOutput, err error, times int) { + mockCFClient. + EXPECT(). + DescribeStacks(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, + _ *cloudformation.DescribeStacksInput, + _ ...func(*cloudformation.Options), + ) (*cloudformation.DescribeStacksOutput, error) { + return output, err + }). + Times(times) +} + +func mockCreateStackCall(mockCFClient *rosaMocks.MockCloudFormationApiClient, output *cloudformation.CreateStackOutput, err error, times int) { + mockCFClient. + EXPECT(). + CreateStack(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, + _ *cloudformation.CreateStackInput, + _ ...func(*cloudformation.Options), + ) (*cloudformation.CreateStackOutput, error) { + return output, err + }). + Times(times) +} + +func mockDescribeStackResourcesCall(mockCFClient *rosaMocks.MockCloudFormationApiClient, output *cloudformation.DescribeStackResourcesOutput, err error, times int) { + mockCFClient. + EXPECT(). + DescribeStackResources(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, + _ *cloudformation.DescribeStackResourcesInput, + _ ...func(*cloudformation.Options), + ) (*cloudformation.DescribeStackResourcesOutput, error) { + return output, err + }). + Times(times) +} + +func mockDeleteStackCall(mockCFClient *rosaMocks.MockCloudFormationApiClient, output *cloudformation.DeleteStackOutput, err error, times int) { + mockCFClient. + EXPECT(). + DeleteStack(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, + _ *cloudformation.DeleteStackInput, + _ ...func(*cloudformation.Options), + ) (*cloudformation.DeleteStackOutput, error) { + return output, err + }). + Times(times) +} + +func mockDescribeSubnetsCall(mockEc2Client *rosaMocks.MockEc2ApiClient, output *ec2.DescribeSubnetsOutput, err error, times int) { + mockEc2Client. + EXPECT(). + DescribeSubnets(gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(_ context.Context, + _ *ec2.DescribeSubnetsInput, + _ ...func(*ec2.Options), + ) (*ec2.DescribeSubnetsOutput, error) { + return output, err + }). + Times(times) +} + +func deleteROSANetwork(ctx context.Context, rosaNetwork *expinfrav1.ROSANetwork) error { + if err := testEnv.Client.Get(ctx, client.ObjectKeyFromObject(rosaNetwork), rosaNetwork); err != nil { + return err + } + + if !rosaNetwork.ObjectMeta.DeletionTimestamp.IsZero() { + return nil + } + + if err := testEnv.Client.Delete(ctx, rosaNetwork); err != nil { + return err + } + + for { + if err := testEnv.Client.Get(ctx, client.ObjectKeyFromObject(rosaNetwork), rosaNetwork); err != nil { + return err + } + + if !rosaNetwork.ObjectMeta.DeletionTimestamp.IsZero() { + break + } + + time.Sleep(50 * time.Millisecond) + } + + return nil +} + +func getROSANetworkReadyCondition(reconciler *ROSANetworkReconciler, rosaNet *expinfrav1.ROSANetwork) (*clusterv1beta1.Condition, error) { + updatedROSANetwork := &expinfrav1.ROSANetwork{} + + if err := reconciler.Client.Get(ctx, client.ObjectKeyFromObject(rosaNet), updatedROSANetwork); err != nil { + return nil, err + } + + return v1beta1conditions.Get(updatedROSANetwork, expinfrav1.ROSANetworkReadyCondition), nil +} diff --git a/exp/controllers/rosaroleconfig_controller.go b/exp/controllers/rosaroleconfig_controller.go new file mode 100644 index 0000000000..01ff345d4d --- /dev/null +++ b/exp/controllers/rosaroleconfig_controller.go @@ -0,0 +1,482 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "errors" + "fmt" + "maps" + "strings" + + accountroles "github.com/openshift/rosa/cmd/create/accountroles" + oidcconfig "github.com/openshift/rosa/cmd/create/oidcconfig" + oidcprovider "github.com/openshift/rosa/cmd/create/oidcprovider" + operatorroles "github.com/openshift/rosa/cmd/create/operatorroles" + "github.com/openshift/rosa/pkg/aws" + interactive "github.com/openshift/rosa/pkg/interactive" + rosalogging "github.com/openshift/rosa/pkg/logging" + "github.com/openshift/rosa/pkg/ocm" + "github.com/openshift/rosa/pkg/reporter" + rosacli "github.com/openshift/rosa/pkg/rosa" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + kerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" + expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" + stsiface "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/sts" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + "sigs.k8s.io/cluster-api/util/predicates" +) + +// ROSARoleConfigReconciler reconciles a ROSARoleConfig object. +type ROSARoleConfigReconciler struct { + client.Client + Recorder record.EventRecorder + WatchFilterValue string + NewStsClient func(cloud.ScopeUsage, cloud.Session, logger.Wrapper, runtime.Object) stsiface.STSClient + NewOCMClient func(ctx context.Context, scope rosa.OCMSecretsRetriever) (rosa.OCMClient, error) + Runtime *rosacli.Runtime +} + +func (r *ROSARoleConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { + log := logger.FromContext(ctx) + r.NewOCMClient = rosa.NewWrappedOCMClientWithoutControlPlane + r.NewStsClient = scope.NewSTSClient + + return ctrl.NewControllerManagedBy(mgr). + For(&expinfrav1.ROSARoleConfig{}). + WithOptions(options). + WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), log.GetLogger(), r.WatchFilterValue)). + Complete(r) +} + +// +kubebuilder:rbac:groups=core,resources=events,verbs=get;list;watch;create;patch +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=rosaroleconfigs,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=rosaroleconfigs/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=rosaroleconfigs/finalizers,verbs=update + +// Reconcile reconciles ROSARoleConfig. +func (r *ROSARoleConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, reterr error) { + log := logger.FromContext(ctx) + + roleConfig := &expinfrav1.ROSARoleConfig{} + if err := r.Get(ctx, req.NamespacedName, roleConfig); err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + log.Error(err, "Failed to get ROSARoleConfig") + return ctrl.Result{Requeue: true}, nil + } + + log = log.WithValues("roleConfig", klog.KObj(roleConfig)) + scope, err := scope.NewRosaRoleConfigScope(scope.RosaRoleConfigScopeParams{ + Client: r.Client, + RosaRoleConfig: roleConfig, + ControllerName: "rosaroleconfig", + Logger: log, + }) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to create rosaroleconfig scope: %w", err) + } + + // Always close the scope and set summary condition + defer func() { + v1beta1conditions.SetSummary(scope.RosaRoleConfig, v1beta1conditions.WithConditions(expinfrav1.RosaRoleConfigReadyCondition), v1beta1conditions.WithStepCounter()) + if err := scope.PatchObject(); err != nil { + reterr = errors.Join(reterr, err) + } + }() + + if err := r.setUpRuntime(ctx, scope); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to set up runtime: %w", err) + } + + if !roleConfig.DeletionTimestamp.IsZero() { + scope.Info("Deleting ROSARoleConfig.") + v1beta1conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1.RosaRoleConfigReadyCondition, expinfrav1.RosaRoleConfigDeletionStarted, clusterv1beta1.ConditionSeverityInfo, "Deletion of RosaRolesConfig started") + err = r.reconcileDelete(scope) + if err == nil { + controllerutil.RemoveFinalizer(scope.RosaRoleConfig, expinfrav1.RosaRoleConfigFinalizer) + } + + return ctrl.Result{}, err + } + + if controllerutil.AddFinalizer(scope.RosaRoleConfig, expinfrav1.RosaRoleConfigFinalizer) { + return ctrl.Result{}, err + } + + if err := r.reconcileAccountRoles(scope); err != nil { + v1beta1conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1.RosaRoleConfigReadyCondition, expinfrav1.RosaRoleConfigReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "Account Roles failure: %v", err) + return ctrl.Result{}, fmt.Errorf("account Roles: %w", err) + } + + if err := r.reconcileOIDC(scope); err != nil { + v1beta1conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1.RosaRoleConfigReadyCondition, expinfrav1.RosaRoleConfigReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "OIDC Config/provider failure: %v", err) + return ctrl.Result{}, fmt.Errorf("oicd Config: %w", err) + } + + if err := r.reconcileOperatorRoles(scope); err != nil { + v1beta1conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1.RosaRoleConfigReadyCondition, expinfrav1.RosaRoleConfigReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "Operator Roles failure: %v", err) + return ctrl.Result{}, fmt.Errorf("operator Roles: %w", err) + } + + if r.rosaRolesConfigReady(scope.RosaRoleConfig) { + v1beta1conditions.Set(scope.RosaRoleConfig, + &clusterv1beta1.Condition{ + Type: expinfrav1.RosaRoleConfigReadyCondition, + Status: corev1.ConditionTrue, + Reason: expinfrav1.RosaRoleConfigCreatedReason, + Severity: clusterv1beta1.ConditionSeverityInfo, + Message: "RosaRoleConfig is ready", + }) + } else { + v1beta1conditions.Set(scope.RosaRoleConfig, + &clusterv1beta1.Condition{ + Type: expinfrav1.RosaRoleConfigReadyCondition, + Status: corev1.ConditionFalse, + Reason: expinfrav1.RosaRoleConfigCreatedReason, + Severity: clusterv1beta1.ConditionSeverityInfo, + Message: "RosaRoleConfig not ready", + }) + } + + return ctrl.Result{}, nil +} + +func (r *ROSARoleConfigReconciler) reconcileDelete(scope *scope.RosaRoleConfigScope) error { + if err := r.deleteOperatorRoles(scope); err != nil { + v1beta1conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1.RosaRoleConfigReadyCondition, expinfrav1.RosaRoleConfigDeletionFailedReason, clusterv1beta1.ConditionSeverityError, "Failed to delete operator roles: %v", err) + return err + } + + if err := r.deleteOIDC(scope); err != nil { + v1beta1conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1.RosaRoleConfigReadyCondition, expinfrav1.RosaRoleConfigDeletionFailedReason, clusterv1beta1.ConditionSeverityError, "Failed to delete OIDC provider: %v", err) + return err + } + + if err := r.deleteAccountRoles(scope); err != nil { + v1beta1conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1.RosaRoleConfigReadyCondition, expinfrav1.RosaRoleConfigDeletionFailedReason, clusterv1beta1.ConditionSeverityError, "Failed to delete account roles: %v", err) + return err + } + + return nil +} + +func (r *ROSARoleConfigReconciler) reconcileOperatorRoles(scope *scope.RosaRoleConfigScope) error { + operatorRoles, err := r.Runtime.AWSClient.ListOperatorRoles("", "", scope.RosaRoleConfig.Spec.OperatorRoleConfig.Prefix) + if err != nil { + return err + } + + operatorRolesRef := v1beta2.AWSRolesRef{} + for _, role := range operatorRoles[scope.RosaRoleConfig.Spec.OperatorRoleConfig.Prefix] { + if strings.Contains(role.RoleName, expinfrav1.IngressOperatorARNSuffix) { + operatorRolesRef.IngressARN = role.RoleARN + } else if strings.Contains(role.RoleName, expinfrav1.ImageRegistryARNSuffix) { + operatorRolesRef.ImageRegistryARN = role.RoleARN + } else if strings.Contains(role.RoleName, expinfrav1.StorageARNSuffix) { + operatorRolesRef.StorageARN = role.RoleARN + } else if strings.Contains(role.RoleName, expinfrav1.NetworkARNSuffix) { + operatorRolesRef.NetworkARN = role.RoleARN + } else if strings.Contains(role.RoleName, expinfrav1.KubeCloudControllerARNSuffix) { + operatorRolesRef.KubeCloudControllerARN = role.RoleARN + } else if strings.Contains(role.RoleName, expinfrav1.NodePoolManagementARNSuffix) { + operatorRolesRef.NodePoolManagementARN = role.RoleARN + } else if strings.Contains(role.RoleName, expinfrav1.ControlPlaneOperatorARNSuffix) { + operatorRolesRef.ControlPlaneOperatorARN = role.RoleARN + } else if strings.Contains(role.RoleName, expinfrav1.KMSProviderARNSuffix) { + operatorRolesRef.KMSProviderARN = role.RoleARN + } + } + + if r.operatorRolesReady(operatorRolesRef) { + scope.RosaRoleConfig.Status.OperatorRolesRef = operatorRolesRef + return nil + } + + installerRoleArn := scope.RosaRoleConfig.Status.AccountRolesRef.InstallerRoleARN + if installerRoleArn == "" { + scope.Logger.Info("installerRoleARN is empty, waiting for installer role to be created.") + return nil + } + oidcConfigID := scope.RosaRoleConfig.Status.OIDCID + if oidcConfigID == "" { + scope.Logger.Info("oidcID is empty, waiting for oidcConfig to be created.") + return nil + } + + policies, err := r.Runtime.OCMClient.GetPolicies("OperatorRole") + if err != nil { + return err + } + + // create operator roles + config := scope.RosaRoleConfig.Spec.OperatorRoleConfig + return operatorroles.CreateOperatorRoles(r.Runtime, rosa.GetOCMClientEnv(r.Runtime.OCMClient), config.PermissionsBoundaryARN, + interactive.ModeAuto, policies, "", config.SharedVPCConfig.IsSharedVPC(), config.Prefix, true, installerRoleArn, + true, oidcConfigID, config.SharedVPCConfig.RouteRoleARN, ocm.DefaultChannelGroup, + config.SharedVPCConfig.VPCEndpointRoleARN) +} + +func (r *ROSARoleConfigReconciler) reconcileOIDC(scope *scope.RosaRoleConfigScope) error { + oidcID := "" + switch scope.RosaRoleConfig.Spec.OidcProviderType { + case expinfrav1.Managed: + // Create oidcConfig if not exist + if scope.RosaRoleConfig.Status.OIDCID == "" { + oidcID, createErr := oidcconfig.CreateOIDCConfig(r.Runtime, true, "", "") + if createErr != nil { + return fmt.Errorf("failed to Create OIDC config: %w", createErr) + } + scope.RosaRoleConfig.Status.OIDCID = oidcID + } + oidcID = scope.RosaRoleConfig.Status.OIDCID + case expinfrav1.Unmanaged: + oidcID = scope.RosaRoleConfig.Spec.OperatorRoleConfig.OIDCID + } + + // Check if oidc Config exist + oidcConfig, err := r.Runtime.OCMClient.GetOidcConfig(oidcID) + if err != nil || oidcConfig == nil { + return fmt.Errorf("failed to get OIDC config: %w", err) + } + + scope.RosaRoleConfig.Status.OIDCID = oidcConfig.ID() + + // check oidc providers + providers, err := r.Runtime.AWSClient.ListOidcProviders("", oidcConfig) + if err != nil { + return err + } + + // set oidc Provider Arn + for _, provider := range providers { + if strings.Contains(provider.Arn, oidcID) { + scope.RosaRoleConfig.Status.OIDCProviderARN = provider.Arn + return nil + } + } + + // create oidc provider if not exist. + if scope.RosaRoleConfig.Status.OIDCProviderARN == "" { + if err := oidcprovider.CreateOIDCProvider(r.Runtime, oidcID, "", true); err != nil { + return err + } + providerArn, err := r.Runtime.AWSClient.GetOpenIDConnectProviderByOidcEndpointUrl(oidcConfig.IssuerUrl()) + if err != nil { + return err + } + scope.RosaRoleConfig.Status.OIDCProviderARN = providerArn + } + + return nil +} + +func (r *ROSARoleConfigReconciler) reconcileAccountRoles(scope *scope.RosaRoleConfigScope) error { + accountRoles, err := r.Runtime.AWSClient.ListAccountRoles(scope.RosaRoleConfig.Spec.AccountRoleConfig.Version) + if err != nil { + // ListAccountRoles return error if roles does not exist. return for any other error + if !strings.Contains(err.Error(), "no account roles found") { + return err + } + } + + accountRolesRef := expinfrav1.AccountRolesRef{} + for _, role := range accountRoles { + if role.RoleName == fmt.Sprintf("%s%s", scope.RosaRoleConfig.Spec.AccountRoleConfig.Prefix, expinfrav1.HCPROSAInstallerRole) { + accountRolesRef.InstallerRoleARN = role.RoleARN + } else if role.RoleName == fmt.Sprintf("%s%s", scope.RosaRoleConfig.Spec.AccountRoleConfig.Prefix, expinfrav1.HCPROSASupportRole) { + accountRolesRef.SupportRoleARN = role.RoleARN + } else if role.RoleName == fmt.Sprintf("%s%s", scope.RosaRoleConfig.Spec.AccountRoleConfig.Prefix, expinfrav1.HCPROSAWorkerRole) { + accountRolesRef.WorkerRoleARN = role.RoleARN + } + } + + // Set account role ref if ready + if r.accountRolesReady(accountRolesRef) { + scope.RosaRoleConfig.Status.AccountRolesRef = accountRolesRef + return nil + } + + policies, err := r.Runtime.OCMClient.GetPolicies("AccountRole") + if err != nil { + return err + } + + return accountroles.CreateHCPRoles(r.Runtime, scope.RosaRoleConfig.Spec.AccountRoleConfig.Prefix, true, scope.RosaRoleConfig.Spec.AccountRoleConfig.PermissionsBoundaryARN, + rosa.GetOCMClientEnv(r.Runtime.OCMClient), policies, scope.RosaRoleConfig.Spec.AccountRoleConfig.Version, scope.RosaRoleConfig.Spec.AccountRoleConfig.Path, + scope.RosaRoleConfig.Spec.AccountRoleConfig.SharedVPCConfig.IsSharedVPC(), scope.RosaRoleConfig.Spec.AccountRoleConfig.SharedVPCConfig.RouteRoleARN, + scope.RosaRoleConfig.Spec.AccountRoleConfig.SharedVPCConfig.VPCEndpointRoleARN) +} + +func (r *ROSARoleConfigReconciler) deleteAccountRoles(scope *scope.RosaRoleConfigScope) error { + // list all account role names. + prefix := scope.RosaRoleConfig.Spec.AccountRoleConfig.Prefix + hasSharedVpcPolicies := scope.RosaRoleConfig.Spec.AccountRoleConfig.SharedVPCConfig.IsSharedVPC() + roleNames := []string{ + fmt.Sprintf("%s%s", prefix, expinfrav1.HCPROSAInstallerRole), + fmt.Sprintf("%s%s", prefix, expinfrav1.HCPROSASupportRole), + fmt.Sprintf("%s%s", prefix, expinfrav1.HCPROSAWorkerRole), + } + + var errs []error + for _, roleName := range roleNames { + if err := r.Runtime.AWSClient.DeleteAccountRole(roleName, prefix, true, hasSharedVpcPolicies); err != nil { + errs = append(errs, err) + } + } + + return kerrors.NewAggregate(errs) +} + +func (r *ROSARoleConfigReconciler) deleteOIDC(scope *scope.RosaRoleConfigScope) error { + // Delete only managed oidc + if scope.RosaRoleConfig.Spec.OidcProviderType == expinfrav1.Managed && scope.RosaRoleConfig.Status.OIDCID != "" { + oidcConfig, err := r.Runtime.OCMClient.GetOidcConfig(scope.RosaRoleConfig.Status.OIDCID) + if err != nil { + return err + } + + oidcEndpointURL := oidcConfig.IssuerUrl() + if usedOidcProvider, err := r.Runtime.OCMClient.HasAClusterUsingOidcProvider(oidcEndpointURL, r.Runtime.Creator.AccountID); err != nil { + return err + } else if usedOidcProvider { + return fmt.Errorf("clusters using OIDC provider '%s', cannot be deleted", oidcEndpointURL) + } + + if err = r.Runtime.AWSClient.DeleteOpenIDConnectProvider(scope.RosaRoleConfig.Status.OIDCProviderARN); err != nil { + return err + } + + return r.Runtime.OCMClient.DeleteOidcConfig(oidcConfig.ID()) + } + + return nil +} + +func (r *ROSARoleConfigReconciler) deleteOperatorRoles(scope *scope.RosaRoleConfigScope) error { + prefix := scope.RosaRoleConfig.Spec.OperatorRoleConfig.Prefix + if usedOperatorRoles, err := r.Runtime.OCMClient.HasAClusterUsingOperatorRolesPrefix(prefix); err != nil { + return err + } else if usedOperatorRoles { + return fmt.Errorf("operator Roles with Prefix '%s' are in use cannot be deleted", prefix) + } + + // list all operator role names. + roleNames := []string{ + fmt.Sprintf("%s%s", prefix, expinfrav1.ControlPlaneOperatorARNSuffix), + fmt.Sprintf("%s%s", prefix, expinfrav1.ImageRegistryARNSuffix), + fmt.Sprintf("%s%s", prefix, expinfrav1.IngressOperatorARNSuffix), + fmt.Sprintf("%s%s", prefix, expinfrav1.KMSProviderARNSuffix), + fmt.Sprintf("%s%s", prefix, expinfrav1.KubeCloudControllerARNSuffix), + fmt.Sprintf("%s%s", prefix, expinfrav1.NetworkARNSuffix), + fmt.Sprintf("%s%s", prefix, expinfrav1.NodePoolManagementARNSuffix), + fmt.Sprintf("%s%s", prefix, expinfrav1.StorageARNSuffix), + } + + allSharedVpcPoliciesNotDeleted := make(map[string]bool) + var errs []error + for _, roleName := range roleNames { + policiesNotDeleted, err := r.Runtime.AWSClient.DeleteOperatorRole(roleName, true, true) + if err != nil && (!strings.Contains(err.Error(), "does not exists") && !strings.Contains(err.Error(), "NoSuchEntity")) { + errs = append(errs, err) + } + + maps.Copy(allSharedVpcPoliciesNotDeleted, policiesNotDeleted) + } + + for policyOutput, notDeleted := range allSharedVpcPoliciesNotDeleted { + if notDeleted { + scope.Logger.Info("unable to delete policy %s: Policy still attached to other resources", policyOutput) + } + } + + return kerrors.NewAggregate(errs) +} + +func (r ROSARoleConfigReconciler) rosaRolesConfigReady(rosaRoleConfig *expinfrav1.ROSARoleConfig) bool { + return rosaRoleConfig.Status.OIDCID != "" && + r.operatorRolesReady(rosaRoleConfig.Status.OperatorRolesRef) && + r.accountRolesReady(rosaRoleConfig.Status.AccountRolesRef) +} + +func (r ROSARoleConfigReconciler) accountRolesReady(accountRolesRef expinfrav1.AccountRolesRef) bool { + return accountRolesRef.InstallerRoleARN != "" && + accountRolesRef.SupportRoleARN != "" && + accountRolesRef.WorkerRoleARN != "" +} + +func (r ROSARoleConfigReconciler) operatorRolesReady(operatorRolesRef v1beta2.AWSRolesRef) bool { + return operatorRolesRef.ControlPlaneOperatorARN != "" && + operatorRolesRef.ImageRegistryARN != "" && + operatorRolesRef.IngressARN != "" && + operatorRolesRef.KMSProviderARN != "" && + operatorRolesRef.KubeCloudControllerARN != "" && + operatorRolesRef.NetworkARN != "" && + operatorRolesRef.NodePoolManagementARN != "" && + operatorRolesRef.StorageARN != "" +} + +// setUpRuntime sets up the ROSA runtime if it doesn't exist. +func (r *ROSARoleConfigReconciler) setUpRuntime(ctx context.Context, scope *scope.RosaRoleConfigScope) error { + if r.Runtime != nil { + return nil + } + + // Create OCM client + ocm, err := r.NewOCMClient(ctx, scope) + if err != nil { + return fmt.Errorf("failed to create OCM client: %w", err) + } + + ocmClient, err := rosa.ConvertToRosaOcmClient(ocm) + if err != nil || ocmClient == nil { + return fmt.Errorf("failed to create OCM client: %w", err) + } + + r.Runtime = rosacli.NewRuntime() + r.Runtime.OCMClient = ocmClient + r.Runtime.Reporter = reporter.CreateReporter() // &rosa.Reporter{} + r.Runtime.Logger = rosalogging.NewLogger() + + r.Runtime.AWSClient, err = aws.NewClient().Logger(r.Runtime.Logger).Build() + if err != nil { + return fmt.Errorf("failed to create aws client: %w", err) + } + + r.Runtime.Creator, err = r.Runtime.AWSClient.GetCreator() + if err != nil { + return fmt.Errorf("failed to get creator: %w", err) + } + + return nil +} diff --git a/exp/controllers/rosaroleconfig_controller_test.go b/exp/controllers/rosaroleconfig_controller_test.go new file mode 100644 index 0000000000..96fbcc8c28 --- /dev/null +++ b/exp/controllers/rosaroleconfig_controller_test.go @@ -0,0 +1,901 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + "math/rand" + "net/http" + "strings" + "testing" + "time" + + awsSdk "github.com/aws/aws-sdk-go-v2/aws" + iamv2 "github.com/aws/aws-sdk-go-v2/service/iam" + iamTypes "github.com/aws/aws-sdk-go-v2/service/iam/types" + stsv2 "github.com/aws/aws-sdk-go-v2/service/sts" + . "github.com/onsi/gomega" + sdk "github.com/openshift-online/ocm-sdk-go" + ocmlogging "github.com/openshift-online/ocm-sdk-go/logging" + ocmsdk "github.com/openshift-online/ocm-sdk-go/testing" + "github.com/openshift/rosa/pkg/aws" + rosaMocks "github.com/openshift/rosa/pkg/aws/mocks" + "github.com/openshift/rosa/pkg/ocm" + rosacli "github.com/openshift/rosa/pkg/rosa" + "github.com/sirupsen/logrus" + "go.uber.org/mock/gomock" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + + rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" + expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" +) + +// generateTestID creates a unique identifier for test resources. +func generateTestID() string { + return fmt.Sprintf("%d-%d", time.Now().UnixNano(), rand.Intn(10000)) +} + +func TestROSARoleConfigReconcileCreate(t *testing.T) { + RegisterTestingT(t) + g := NewWithT(t) + + // Generate unique test ID for resource isolation + testID := generateTestID() + + ssoServer := ocmsdk.MakeTCPServer() + apiServer := ocmsdk.MakeTCPServer() + defer ssoServer.Close() + defer apiServer.Close() + apiServer.SetAllowUnhandledRequests(true) + apiServer.SetUnhandledRequestStatusCode(http.StatusInternalServerError) + ctx := context.TODO() + + // Create the token: + accessToken := ocmsdk.MakeTokenString("Bearer", 15*time.Minute) + + // Prepare the server: + ssoServer.AppendHandlers( + ocmsdk.RespondWithAccessToken(accessToken), + ) + logger, err := ocmlogging.NewGoLoggerBuilder(). + Debug(false). + Build() + Expect(err).ToNot(HaveOccurred()) + // Set up the connection with the fake config + connection, err := sdk.NewConnectionBuilder(). + Logger(logger). + Tokens(accessToken). + URL(apiServer.URL()). + Build() + // Initialize client object + Expect(err).To(BeNil()) + ocmClient := ocm.NewClientWithConnection(connection) + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + // mock iam client to expect ListRoles call + mockIamClient := rosaMocks.NewMockIamApiClient(mockCtrl) + mockIamClient.EXPECT().ListRoles(gomock.Any(), gomock.Any()).Return(&iamv2.ListRolesOutput{ + Roles: []iamTypes.Role{}, + }, nil).AnyTimes() + + mockIamClient.EXPECT().ListOpenIDConnectProviders(gomock.Any(), gomock.Any()).Return(&iamv2.ListOpenIDConnectProvidersOutput{ + OpenIDConnectProviderList: []iamTypes.OpenIDConnectProviderListEntry{}, + }, nil).AnyTimes() + + // Mock GetRole calls - return role not found error to trigger role creation + mockIamClient.EXPECT().GetRole(gomock.Any(), gomock.Any()).Return(nil, &iamTypes.NoSuchEntityException{ + Message: awsSdk.String("The role with name test-role does not exist."), + }).AnyTimes() + + // Mock CreateRole calls for role creation + mockIamClient.EXPECT().CreateRole(gomock.Any(), gomock.Any()).Return(&iamv2.CreateRoleOutput{ + Role: &iamTypes.Role{ + RoleName: awsSdk.String("test-role"), + Arn: awsSdk.String("arn:aws:iam::123456789012:role/test-role"), + }, + }, nil).AnyTimes() + + providerARN := "test-oidc-id-created" + mockIamClient.EXPECT().CreateOpenIDConnectProvider(gomock.Any(), gomock.Any(), gomock.Any()).Return( + &iamv2.CreateOpenIDConnectProviderOutput{OpenIDConnectProviderArn: &providerARN}, nil).AnyTimes() + + // Mock AttachRolePolicy calls + mockIamClient.EXPECT().AttachRolePolicy(gomock.Any(), gomock.Any()).Return(&iamv2.AttachRolePolicyOutput{}, nil).AnyTimes() + + // Mock CreatePolicy calls + mockIamClient.EXPECT().CreatePolicy(gomock.Any(), gomock.Any()).Return(&iamv2.CreatePolicyOutput{ + Policy: &iamTypes.Policy{ + PolicyName: awsSdk.String("test-policy"), + Arn: awsSdk.String("arn:aws:iam::123456789012:policy/test-policy"), + }, + }, nil).AnyTimes() + + // Mock GetPolicy calls - return success for AWS managed policies, not found for others + mockIamClient.EXPECT().GetPolicy(gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, input *iamv2.GetPolicyInput) (*iamv2.GetPolicyOutput, error) { + switch *input.PolicyArn { + case "arn:aws:iam::aws:policy/sts_hcp_installer_permission_policy": + return &iamv2.GetPolicyOutput{ + Policy: &iamTypes.Policy{ + PolicyName: awsSdk.String("sts_hcp_installer_permission_policy"), + Arn: awsSdk.String("arn:aws:iam::aws:policy/sts_hcp_installer_permission_policy"), + }, + }, nil + case "arn:aws:iam::aws:policy/sts_hcp_support_permission_policy": + return &iamv2.GetPolicyOutput{ + Policy: &iamTypes.Policy{ + PolicyName: awsSdk.String("sts_hcp_support_permission_policy"), + Arn: awsSdk.String("arn:aws:iam::aws:policy/sts_hcp_support_permission_policy"), + }, + }, nil + case "arn:aws:iam::aws:policy/sts_hcp_worker_permission_policy": + return &iamv2.GetPolicyOutput{ + Policy: &iamTypes.Policy{ + PolicyName: awsSdk.String("sts_hcp_worker_permission_policy"), + Arn: awsSdk.String("arn:aws:iam::aws:policy/sts_hcp_worker_permission_policy"), + }, + }, nil + default: + return nil, &iamTypes.NoSuchEntityException{ + Message: awsSdk.String("The policy does not exist."), + } + } + }).AnyTimes() + + // Mock ListPolicies calls - return expected ROSA managed policies + mockIamClient.EXPECT().ListPolicies(gomock.Any(), gomock.Any()).Return(&iamv2.ListPoliciesOutput{ + Policies: []iamTypes.Policy{ + { + PolicyName: awsSdk.String("sts_hcp_installer_permission_policy"), + Arn: awsSdk.String("arn:aws:iam::aws:policy/sts_hcp_installer_permission_policy"), + }, + { + PolicyName: awsSdk.String("sts_hcp_support_permission_policy"), + Arn: awsSdk.String("arn:aws:iam::aws:policy/sts_hcp_support_permission_policy"), + }, + { + PolicyName: awsSdk.String("sts_hcp_worker_permission_policy"), + Arn: awsSdk.String("arn:aws:iam::aws:policy/sts_hcp_worker_permission_policy"), + }, + }, + }, nil).AnyTimes() + + // mock sts - add common STS calls that might be needed during role creation + mockSTSClient := rosaMocks.NewMockStsApiClient(mockCtrl) + mockSTSClient.EXPECT().GetCallerIdentity(gomock.Any(), gomock.Any()).Return(&stsv2.GetCallerIdentityOutput{ + Arn: awsSdk.String("fake"), + Account: awsSdk.String("123"), + UserId: awsSdk.String("test-user-id"), + }, nil).AnyTimes() + + awsClient := aws.New( + awsSdk.Config{}, + aws.NewLoggerWrapper(logrus.New(), nil), + mockIamClient, + rosaMocks.NewMockEc2ApiClient(mockCtrl), + rosaMocks.NewMockOrganizationsApiClient(mockCtrl), + rosaMocks.NewMockS3ApiClient(mockCtrl), + rosaMocks.NewMockSecretsManagerApiClient(mockCtrl), + mockSTSClient, + rosaMocks.NewMockCloudFormationApiClient(mockCtrl), + rosaMocks.NewMockServiceQuotasApiClient(mockCtrl), + rosaMocks.NewMockServiceQuotasApiClient(mockCtrl), + &aws.AccessKey{}, + false, + ) + + r := rosacli.NewRuntime() + r.OCMClient = ocmClient + r.AWSClient = awsClient + r.Creator = &aws.Creator{ + ARN: "fake", + AccountID: "123", + IsSTS: false, + } + // Mock OCM API calls using path-based routing + apiServer.RouteToHandler("GET", "/api/clusters_mgmt/v1/aws_inquiries/sts_policies", + func(w http.ResponseWriter, r *http.Request) { + query := r.URL.Query().Get("search") + if strings.Contains(query, "AccountRole") { + // Return AccountRole policies + ocmsdk.RespondWithJSON(http.StatusOK, `{ + "items": [ + { + "id": "sts_hcp_installer_permission_policy", + "arn": "arn:aws:iam::aws:policy/sts_hcp_installer_permission_policy", + "type": "AccountRole" + }, + { + "id": "sts_hcp_support_permission_policy", + "arn": "arn:aws:iam::aws:policy/sts_hcp_support_permission_policy", + "type": "AccountRole" + }, + { + "id": "sts_hcp_worker_permission_policy", + "arn": "arn:aws:iam::aws:policy/sts_hcp_worker_permission_policy", + "type": "AccountRole" + }, + { + "id": "sts_hcp_instance_worker_permission_policy", + "arn": "arn:aws:iam::aws:policy/sts_hcp_instance_worker_permission_policy", + "type": "AccountRole" + } + ] + }`)(w, r) + } else if strings.Contains(query, "OperatorRole") { + // Return OperatorRole policies + ocmsdk.RespondWithJSON(http.StatusOK, `{ + "items": [ + { + "id": "openshift_hcp_ingress_policy", + "arn": "arn:aws:iam::aws:policy/openshift_hcp_ingress_policy", + "type": "OperatorRole" + }, + { + "id": "openshift_hcp_image_registry_policy", + "arn": "arn:aws:iam::aws:policy/openshift_hcp_image_registry_policy", + "type": "OperatorRole" + }, + { + "id": "openshift_hcp_storage_policy", + "arn": "arn:aws:iam::aws:policy/openshift_hcp_storage_policy", + "type": "OperatorRole" + }, + { + "id": "openshift_hcp_network_policy", + "arn": "arn:aws:iam::aws:policy/openshift_hcp_network_policy", + "type": "OperatorRole" + }, + { + "id": "openshift_hcp_kube_controller_policy", + "arn": "arn:aws:iam::aws:policy/openshift_hcp_kube_controller_policy", + "type": "OperatorRole" + }, + { + "id": "openshift_hcp_node_pool_policy", + "arn": "arn:aws:iam::aws:policy/openshift_hcp_node_pool_policy", + "type": "OperatorRole" + }, + { + "id": "openshift_hcp_control_plane_policy", + "arn": "arn:aws:iam::aws:policy/openshift_hcp_control_plane_policy", + "type": "OperatorRole" + }, + { + "id": "openshift_hcp_kms_policy", + "arn": "arn:aws:iam::aws:policy/openshift_hcp_kms_policy", + "type": "OperatorRole" + } + ] + }`)(w, r) + } else { + // Default response for other queries + ocmsdk.RespondWithJSON(http.StatusOK, `{"items": []}`)(w, r) + } + }) + + // Mock ocm API calls - first call gets tris response + apiServer.AppendHandlers( + ocmsdk.RespondWithJSON( + http.StatusOK, "", + ), + ) + // Mock GetOidcConfig call + apiServer.AppendHandlers( + ocmsdk.RespondWithJSON( + http.StatusOK, `{"id": "test-oidc-id", "issuer_url": "https://test.oidc.url"}`, + ), + ) + // Mock OIDC config creation calls - POST /api/clusters_mgmt/v1/oidc_configs + apiServer.RouteToHandler("POST", "/api/clusters_mgmt/v1/oidc_configs", + ocmsdk.RespondWithJSON( + http.StatusCreated, `{"id": "test-oidc-id-created", "issuer_url": "https://test.oidc.url"}`, + ), + ) + // Additional OIDC config call mock for GET requests + apiServer.RouteToHandler("GET", "/api/clusters_mgmt/v1/oidc_configs/test-oidc-id-created", + ocmsdk.RespondWithJSON( + http.StatusOK, `{"id": "test-oidc-id-created", "issuer_url": "https://test.oidc.url"}`, + ), + ) + + // Mock GetAllCredRequests call + apiServer.AppendHandlers( + ocmsdk.RespondWithJSON( + http.StatusOK, `[]`, + ), + ) + // Mock HasAClusterUsingOperatorRolesPrefix call + apiServer.AppendHandlers( + ocmsdk.RespondWithJSON( + http.StatusOK, `false`, + ), + ) + // GET /api/clusters_mgmt/v1/products/rosa/technology_previews/hcp-zero-egress + apiServer.AppendHandlers( + ocmsdk.RespondWithJSON( + http.StatusInternalServerError, "", + ), + ) + + // Create CRs with unique names to avoid conflicts + ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("test-namespace-%s", testID)) + rosaRoleConfig := &expinfrav1.ROSARoleConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("test-rosa-role-%s", testID), + Namespace: ns.Name, + Finalizers: []string{expinfrav1.RosaRoleConfigFinalizer}, + }, + Spec: expinfrav1.ROSARoleConfigSpec{ + AccountRoleConfig: expinfrav1.AccountRoleConfig{ + Prefix: "test", + Version: "4.15.0", + }, + OperatorRoleConfig: expinfrav1.OperatorRoleConfig{ + Prefix: "test", + }, + OidcProviderType: expinfrav1.Managed, + }, + } + g.Expect(err).ToNot(HaveOccurred()) + + createObject(g, rosaRoleConfig, ns.Name) + defer cleanupObject(g, rosaRoleConfig) + + // Setup the reconciler with these mocks + reconciler := &ROSARoleConfigReconciler{ + Client: testEnv.Client, + Runtime: r, + } + + // Call the Reconcile function + req := ctrl.Request{} + req.NamespacedName = types.NamespacedName{Name: rosaRoleConfig.Name, Namespace: rosaRoleConfig.Namespace} + _, errReconcile := reconciler.Reconcile(ctx, req) + + // Assertions - expect the installer role empty error since AccountRolesRef is not populated yet + g.Expect(errReconcile).ToNot(HaveOccurred()) + + // Sleep to ensure the status is updated + time.Sleep(100 * time.Millisecond) + + // Check the status of the ROSARoleConfig resource + updatedRoleConfig := &expinfrav1.ROSARoleConfig{} + err = reconciler.Client.Get(ctx, req.NamespacedName, updatedRoleConfig) + g.Expect(err).ToNot(HaveOccurred()) + + // We expect only oidcID to be set with first reconcile happen, Account roles and Operator roles should be empty + g.Expect(updatedRoleConfig.Status.OIDCID).To(Equal("test-oidc-id-created")) + g.Expect(updatedRoleConfig.Status.AccountRolesRef).To(Equal(expinfrav1.AccountRolesRef{})) + g.Expect(updatedRoleConfig.Status.OperatorRolesRef).To(Equal(rosacontrolplanev1.AWSRolesRef{})) + + // Ready condition should be false. + for _, condition := range updatedRoleConfig.Status.Conditions { + if condition.Type == expinfrav1.RosaRoleConfigReadyCondition { + g.Expect(condition.Status).To(Equal(corev1.ConditionFalse)) + break + } + } +} + +func TestROSARoleConfigReconcileExist(t *testing.T) { + RegisterTestingT(t) + g := NewWithT(t) + + // Generate unique test ID for resource isolation + testID := generateTestID() + + ssoServer := ocmsdk.MakeTCPServer() + apiServer := ocmsdk.MakeTCPServer() + defer ssoServer.Close() + defer apiServer.Close() + apiServer.SetAllowUnhandledRequests(true) + apiServer.SetUnhandledRequestStatusCode(http.StatusInternalServerError) + ctx := context.TODO() + + // Create the token: + accessToken := ocmsdk.MakeTokenString("Bearer", 15*time.Minute) + + // Prepare the server: + ssoServer.AppendHandlers( + ocmsdk.RespondWithAccessToken(accessToken), + ) + logger, err := ocmlogging.NewGoLoggerBuilder(). + Debug(false). + Build() + Expect(err).ToNot(HaveOccurred()) + // Set up the connection with the fake config + connection, err := sdk.NewConnectionBuilder(). + Logger(logger). + Tokens(accessToken). + URL(apiServer.URL()). + Build() + // Initialize client object + Expect(err).To(BeNil()) + ocmClient := ocm.NewClientWithConnection(connection) + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + // mock iam client to expect ListRoles call - return existing account roles and operator roles + + mockAWSClient := aws.NewMockClient(mockCtrl) + mockAWSClient.EXPECT().HasManagedPolicies(gomock.Any()).Return(false, nil).AnyTimes() + mockAWSClient.EXPECT().HasHostedCPPolicies(gomock.Any()).Return(true, nil).AnyTimes() + + // Return existing account roles + mockAWSClient.EXPECT().ListAccountRoles(gomock.Any()).Return([]aws.Role{ + { + RoleName: "test-HCP-ROSA-Installer-Role", + RoleARN: "arn:aws:iam::123456789012:role/test-HCP-ROSA-Installer-Role", + }, + { + RoleName: "test-HCP-ROSA-Support-Role", + RoleARN: "arn:aws:iam::123456789012:role/test-HCP-ROSA-Support-Role", + }, + { + RoleName: "test-HCP-ROSA-Worker-Role", + RoleARN: "arn:aws:iam::123456789012:role/test-HCP-ROSA-Worker-Role", + }, + }, nil).AnyTimes() + + // Return existing operator roles + mockAWSClient.EXPECT().ListOperatorRoles(gomock.Any(), gomock.Any(), gomock.Any()).Return(map[string][]aws.OperatorRoleDetail{ + "test": { + { + RoleName: "test-openshift-ingress-operator-cloud-credentials", + RoleARN: "arn:aws:iam::123456789012:role/test-openshift-ingress-operator-cloud-credentials", + }, + { + RoleName: "test-openshift-image-registry-installer-cloud-credentials", + RoleARN: "arn:aws:iam::123456789012:role/test-openshift-image-registry-installer-cloud-credentials", + }, + { + RoleName: "test-openshift-cluster-csi-drivers-ebs-cloud-credentials", + RoleARN: "arn:aws:iam::123456789012:role/test-openshift-cluster-csi-drivers-ebs-cloud-credentials", + }, + { + RoleName: "test-openshift-cloud-network-config-controller-cloud-credentials", + RoleARN: "arn:aws:iam::123456789012:role/test-openshift-cloud-network-config-controller-cloud-credentials", + }, + { + RoleName: "test-kube-system-kube-controller-manager", + RoleARN: "arn:aws:iam::123456789012:role/test-kube-system-kube-controller-manager", + }, + { + RoleName: "test-kube-system-capa-controller-manager", + RoleARN: "arn:aws:iam::123456789012:role/test-kube-system-capa-controller-manager", + }, + { + RoleName: "test-kube-system-control-plane-operator", + RoleARN: "arn:aws:iam::123456789012:role/test-kube-system-control-plane-operator", + }, + { + RoleName: "test-kube-system-kms-provider", + RoleARN: "arn:aws:iam::123456789012:role/test-kube-system-kms-provider", + }, + }, + }, nil).AnyTimes() + // Return existing OIDC providers + mockAWSClient.EXPECT().ListOidcProviders(gomock.Any(), gomock.Any()).Return([]aws.OidcProviderOutput{ + { + Arn: "arn:aws:iam::123456789012:oidc-provider/test-existing-oidc-id", + }, + }, nil).AnyTimes() + + mockAWSClient.EXPECT().GetCreator().Return(&aws.Creator{ + ARN: "arn:aws:iam::123456789012:user/test-user", + AccountID: "123456789012", + IsSTS: false, + }, nil).AnyTimes() + + awsClient := mockAWSClient + + r := rosacli.NewRuntime() + r.OCMClient = ocmClient + r.AWSClient = awsClient + r.Creator = &aws.Creator{ + ARN: "arn:aws:iam::123456789012:user/test-user", + AccountID: "123456789012", + IsSTS: false, + } + + // mock ocm API calls - first call gets tris response + apiServer.AppendHandlers( + ocmsdk.RespondWithJSON( + http.StatusOK, "", + ), + ) + // Mock GetOidcConfig call - return existing OIDC config + apiServer.AppendHandlers( + ocmsdk.RespondWithJSON( + http.StatusOK, `{"id": "test-existing-oidc-id", "issuer_url": "https://test.existing.oidc.url"}`, + ), + ) + // Mock GetAllClusters call + apiServer.AppendHandlers( + ocmsdk.RespondWithJSON( + http.StatusOK, `{"items": []}`, + ), + ) + // Mock GetAllCredRequests call + apiServer.AppendHandlers( + ocmsdk.RespondWithJSON( + http.StatusOK, `{}`, + ), + ) + // Mock HasAClusterUsingOperatorRolesPrefix call + apiServer.AppendHandlers( + ocmsdk.RespondWithJSON( + http.StatusOK, `false`, + ), + ) + + // Mock existing OIDC config GET request + apiServer.RouteToHandler("GET", "/api/clusters_mgmt/v1/oidc_configs/test-existing-oidc-id", + ocmsdk.RespondWithJSON( + http.StatusOK, `{"id": "test-existing-oidc-id", "issuer_url": "https://test.existing.oidc.url"}`, + ), + ) + + // Create CRs with unique names to avoid conflicts + ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("test-namespace-all-existing-%s", testID)) + g.Expect(err).ToNot(HaveOccurred()) + + rosaRoleConfig := &expinfrav1.ROSARoleConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("test-rosa-role-all-existing-%s", testID), + Namespace: ns.Name, + Finalizers: []string{expinfrav1.RosaRoleConfigFinalizer}, + }, + Spec: expinfrav1.ROSARoleConfigSpec{ + AccountRoleConfig: expinfrav1.AccountRoleConfig{ + Prefix: "test", + Version: "4.15.0", + }, + OperatorRoleConfig: expinfrav1.OperatorRoleConfig{ + Prefix: "test", + }, + OidcProviderType: expinfrav1.Managed, + }, + Status: expinfrav1.ROSARoleConfigStatus{ + OIDCID: "test-existing-oidc-id", + }, + } + + createObject(g, rosaRoleConfig, ns.Name) + defer cleanupObject(g, rosaRoleConfig) + + // Setup the reconciler with these mocks + reconciler := &ROSARoleConfigReconciler{ + Client: testEnv.Client, + Runtime: r, + } + + // Call the Reconcile function + req := ctrl.Request{} + req.NamespacedName = types.NamespacedName{Name: rosaRoleConfig.Name, Namespace: rosaRoleConfig.Namespace} + _, errReconcile := reconciler.Reconcile(ctx, req) + + // Assertions - since all resources exist, reconciliation should succeed + g.Expect(errReconcile).ToNot(HaveOccurred()) + + var updatedRoleConfig *expinfrav1.ROSARoleConfig + + g.Eventually(func(g Gomega) { + // Check the status of the ROSARoleConfig resource + updatedRoleConfig = &expinfrav1.ROSARoleConfig{} + g.Expect(reconciler.Client.Get(ctx, req.NamespacedName, updatedRoleConfig)).ToNot(HaveOccurred()) + + // Verify that all existing account roles are preserved + g.Expect(updatedRoleConfig.Status.AccountRolesRef.InstallerRoleARN).To(Equal("arn:aws:iam::123456789012:role/test-HCP-ROSA-Installer-Role")) + g.Expect(updatedRoleConfig.Status.AccountRolesRef.SupportRoleARN).To(Equal("arn:aws:iam::123456789012:role/test-HCP-ROSA-Support-Role")) + g.Expect(updatedRoleConfig.Status.AccountRolesRef.WorkerRoleARN).To(Equal("arn:aws:iam::123456789012:role/test-HCP-ROSA-Worker-Role")) + + // Verify OIDC config is preserved + g.Expect(updatedRoleConfig.Status.OIDCID).To(Equal("test-existing-oidc-id")) + g.Expect(updatedRoleConfig.Status.OIDCProviderARN).To(Equal("arn:aws:iam::123456789012:oidc-provider/test-existing-oidc-id")) + + // Verify operator roles are populated with existing roles + g.Expect(updatedRoleConfig.Status.OperatorRolesRef.IngressARN).To(Equal("arn:aws:iam::123456789012:role/test-openshift-ingress-operator-cloud-credentials")) + g.Expect(updatedRoleConfig.Status.OperatorRolesRef.ImageRegistryARN).To(Equal("arn:aws:iam::123456789012:role/test-openshift-image-registry-installer-cloud-credentials")) + g.Expect(updatedRoleConfig.Status.OperatorRolesRef.StorageARN).To(Equal("arn:aws:iam::123456789012:role/test-openshift-cluster-csi-drivers-ebs-cloud-credentials")) + g.Expect(updatedRoleConfig.Status.OperatorRolesRef.NetworkARN).To(Equal("arn:aws:iam::123456789012:role/test-openshift-cloud-network-config-controller-cloud-credentials")) + g.Expect(updatedRoleConfig.Status.OperatorRolesRef.KubeCloudControllerARN).To(Equal("arn:aws:iam::123456789012:role/test-kube-system-kube-controller-manager")) + g.Expect(updatedRoleConfig.Status.OperatorRolesRef.NodePoolManagementARN).To(Equal("arn:aws:iam::123456789012:role/test-kube-system-capa-controller-manager")) + g.Expect(updatedRoleConfig.Status.OperatorRolesRef.ControlPlaneOperatorARN).To(Equal("arn:aws:iam::123456789012:role/test-kube-system-control-plane-operator")) + g.Expect(updatedRoleConfig.Status.OperatorRolesRef.KMSProviderARN).To(Equal("arn:aws:iam::123456789012:role/test-kube-system-kms-provider")) + }).WithTimeout(30 * time.Second).Should(Succeed()) + + // Should have a condition indicating success - expect Ready condition to be True + g.Eventually(func(g Gomega) { + readyCondition := v1beta1conditions.Get(updatedRoleConfig, expinfrav1.RosaRoleConfigReadyCondition) + g.Expect(readyCondition).ToNot(BeNil()) + g.Expect(readyCondition.Status).To(Equal(corev1.ConditionTrue)) + g.Expect(readyCondition.Reason).To(Equal(expinfrav1.RosaRoleConfigCreatedReason)) + }).Should(Succeed()) +} + +func TestROSARoleConfigReconcileDelete(t *testing.T) { + RegisterTestingT(t) + g := NewWithT(t) + + // Generate unique test ID for resource isolation + testID := generateTestID() + + ssoServer := ocmsdk.MakeTCPServer() + apiServer := ocmsdk.MakeTCPServer() + defer ssoServer.Close() + defer apiServer.Close() + apiServer.SetAllowUnhandledRequests(true) + apiServer.SetUnhandledRequestStatusCode(http.StatusInternalServerError) + ctx := context.TODO() + + // Create the token: + accessToken := ocmsdk.MakeTokenString("Bearer", 15*time.Minute) + + // Prepare the server: + ssoServer.AppendHandlers( + ocmsdk.RespondWithAccessToken(accessToken), + ) + logger, err := ocmlogging.NewGoLoggerBuilder(). + Debug(false). + Build() + Expect(err).ToNot(HaveOccurred()) + // Set up the connection with the fake config + connection, err := sdk.NewConnectionBuilder(). + Logger(logger). + Tokens(accessToken). + URL(apiServer.URL()). + Build() + // Initialize client object + Expect(err).To(BeNil()) + ocmClient := ocm.NewClientWithConnection(connection) + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + mockAWSClient := aws.NewMockClient(mockCtrl) + mockAWSClient.EXPECT().HasManagedPolicies(gomock.Any()).Return(false, nil).AnyTimes() + mockAWSClient.EXPECT().HasHostedCPPolicies(gomock.Any()).Return(true, nil).AnyTimes() + mockAWSClient.EXPECT().GetOperatorRolesFromAccountByPrefix(gomock.Any(), gomock.Any()).Return([]string{ + "test-openshift-ingress-operator-cloud-credentials", + "test-openshift-image-registry-installer-cloud-credentials", + "test-openshift-cluster-csi-drivers-ebs-cloud-credentials", + "test-openshift-cloud-network-config-controller-cloud-credentials", + "test-kube-system-kube-controller-manager", + "test-kube-system-capa-controller-manager", + "test-kube-system-control-plane-operator", + "test-kube-system-kms-provider", + }, nil).AnyTimes() + + // Return existing account roles that will be deleted + mockAWSClient.EXPECT().ListAccountRoles(gomock.Any()).Return([]aws.Role{ + { + RoleName: "test-HCP-ROSA-Installer-Role", + RoleARN: "arn:aws:iam::123456789012:role/test-HCP-ROSA-Installer-Role", + }, + { + RoleName: "test-HCP-ROSA-Support-Role", + RoleARN: "arn:aws:iam::123456789012:role/test-HCP-ROSA-Support-Role", + }, + { + RoleName: "test-HCP-ROSA-Worker-Role", + RoleARN: "arn:aws:iam::123456789012:role/test-HCP-ROSA-Worker-Role", + }, + }, nil).AnyTimes() + + mockAWSClient.EXPECT().ListOperatorRoles(gomock.Any(), gomock.Any(), gomock.Any()).Return(map[string][]aws.OperatorRoleDetail{ + "test": { + { + RoleName: "test-openshift-ingress-operator-cloud-credentials", + RoleARN: "arn:aws:iam::123456789012:role/test-openshift-ingress-operator-cloud-credentials", + }, + { + RoleName: "test-openshift-image-registry-installer-cloud-credentials", + RoleARN: "arn:aws:iam::123456789012:role/test-openshift-image-registry-installer-cloud-credentials", + }, + { + RoleName: "test-openshift-cluster-csi-drivers-ebs-cloud-credentials", + RoleARN: "arn:aws:iam::123456789012:role/test-openshift-cluster-csi-drivers-ebs-cloud-credentials", + }, + { + RoleName: "test-openshift-cloud-network-config-controller-cloud-credentials", + RoleARN: "arn:aws:iam::123456789012:role/test-openshift-cloud-network-config-controller-cloud-credentials", + }, + { + RoleName: "test-kube-system-kube-controller-manager", + RoleARN: "arn:aws:iam::123456789012:role/test-kube-system-kube-controller-manager", + }, + { + RoleName: "test-kube-system-capa-controller-manager", + RoleARN: "arn:aws:iam::123456789012:role/test-kube-system-capa-controller-manager", + }, + { + RoleName: "test-kube-system-control-plane-operator", + RoleARN: "arn:aws:iam::123456789012:role/test-kube-system-control-plane-operator", + }, + { + RoleName: "test-kube-system-kms-provider", + RoleARN: "arn:aws:iam::123456789012:role/test-kube-system-kms-provider", + }, + }, + }, nil).AnyTimes() + + // Return existing OIDC providers that will be deleted + mockAWSClient.EXPECT().ListOidcProviders(gomock.Any(), gomock.Any()).Return([]aws.OidcProviderOutput{ + { + Arn: "arn:aws:iam::123456789012:oidc-provider/test-existing-oidc-id", + }, + }, nil).AnyTimes() + + // Delete operator roles (called individually for each role) + mockAWSClient.EXPECT().DeleteOperatorRole(gomock.Any(), gomock.Any(), true).Return(map[string]bool{}, nil).AnyTimes() + + // Mock OIDC provider deletion + mockAWSClient.EXPECT().DeleteOpenIDConnectProvider("arn:aws:iam::123456789012:oidc-provider/test-existing-oidc-id").Return(nil).AnyTimes() + + // Delete account roles (called individually for each role) + mockAWSClient.EXPECT().DeleteAccountRole(gomock.Any(), gomock.Any(), true, false).Return(nil).AnyTimes() + + mockAWSClient.EXPECT().GetCreator().Return(&aws.Creator{ + ARN: "arn:aws:iam::123456789012:user/test-user", + AccountID: "123456789012", + IsSTS: false, + }, nil).AnyTimes() + + awsClient := mockAWSClient + + r := rosacli.NewRuntime() + r.OCMClient = ocmClient + r.AWSClient = awsClient + r.Creator = &aws.Creator{ + ARN: "arn:aws:iam::123456789012:user/test-user", + AccountID: "123456789012", + IsSTS: false, + } + + // Mock OCM API calls + apiServer.AppendHandlers( + ocmsdk.RespondWithJSON( + http.StatusOK, "", + ), + ) + // Mock GetOidcConfig call - return existing OIDC config + apiServer.AppendHandlers( + ocmsdk.RespondWithJSON( + http.StatusOK, `{"id": "test-existing-oidc-id", "issuer_url": "https://test.existing.oidc.url"}`, + ), + ) + // Mock GetAllClusters call + apiServer.AppendHandlers( + ocmsdk.RespondWithJSON( + http.StatusOK, `{"items": []}`, + ), + ) + // Mock GetAllCredRequests call + apiServer.AppendHandlers( + ocmsdk.RespondWithJSON( + http.StatusOK, `{}`, + ), + ) + // Mock HasAClusterUsingOperatorRolesPrefix call + apiServer.AppendHandlers( + ocmsdk.RespondWithJSON( + http.StatusOK, `false`, + ), + ) + + // Mock existing OIDC config GET request + apiServer.RouteToHandler("GET", "/api/clusters_mgmt/v1/oidc_configs/test-existing-oidc-id", + ocmsdk.RespondWithJSON( + http.StatusOK, `{"id": "test-existing-oidc-id", "issuer_url": "https://test.existing.oidc.url"}`, + ), + ) + + // Mock OIDC config deletion + apiServer.RouteToHandler("DELETE", "/api/clusters_mgmt/v1/oidc_configs/test-existing-oidc-id", + ocmsdk.RespondWithJSON( + http.StatusOK, `{}`, + ), + ) + + // Create CRs with unique names to avoid conflicts + ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("test-namespace-delete-%s", testID)) + g.Expect(err).ToNot(HaveOccurred()) + + // Create ROSARoleConfig with populated status (simulating existing resources) + rosaRoleConfig := &expinfrav1.ROSARoleConfig{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("test-rosa-role-delete-%s", testID), + Namespace: ns.Name, + Finalizers: []string{expinfrav1.RosaRoleConfigFinalizer}, + // Set deletion timestamp to simulate deletion request + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + }, + Spec: expinfrav1.ROSARoleConfigSpec{ + AccountRoleConfig: expinfrav1.AccountRoleConfig{ + Prefix: "test", + Version: "4.15.0", + }, + OperatorRoleConfig: expinfrav1.OperatorRoleConfig{ + Prefix: "test", + }, + OidcProviderType: expinfrav1.Managed, + }, + Status: expinfrav1.ROSARoleConfigStatus{ + OIDCID: "test-existing-oidc-id", + OIDCProviderARN: "arn:aws:iam::123456789012:oidc-provider/test-existing-oidc-id", + AccountRolesRef: expinfrav1.AccountRolesRef{ + InstallerRoleARN: "arn:aws:iam::123456789012:role/test-HCP-ROSA-Installer-Role", + SupportRoleARN: "arn:aws:iam::123456789012:role/test-HCP-ROSA-Support-Role", + WorkerRoleARN: "arn:aws:iam::123456789012:role/test-HCP-ROSA-Worker-Role", + }, + OperatorRolesRef: rosacontrolplanev1.AWSRolesRef{ + IngressARN: "arn:aws:iam::123456789012:role/test-openshift-ingress-operator-cloud-credentials", + ImageRegistryARN: "arn:aws:iam::123456789012:role/test-openshift-image-registry-installer-cloud-credentials", + StorageARN: "arn:aws:iam::123456789012:role/test-openshift-cluster-csi-drivers-ebs-cloud-credentials", + NetworkARN: "arn:aws:iam::123456789012:role/test-openshift-cloud-network-config-controller-cloud-credentials", + KubeCloudControllerARN: "arn:aws:iam::123456789012:role/test-kube-system-kube-controller-manager", + NodePoolManagementARN: "arn:aws:iam::123456789012:role/test-kube-system-capa-controller-manager", + ControlPlaneOperatorARN: "arn:aws:iam::123456789012:role/test-kube-system-control-plane-operator", + KMSProviderARN: "arn:aws:iam::123456789012:role/test-kube-system-kms-provider", + }, + }, + } + + createObject(g, rosaRoleConfig, ns.Name) + defer cleanupObject(g, rosaRoleConfig) + + // Setup the reconciler with these mocks + reconciler := &ROSARoleConfigReconciler{ + Client: testEnv.Client, + Runtime: r, + } + + // Call the Reconcile function + req := ctrl.Request{} + req.NamespacedName = types.NamespacedName{Name: rosaRoleConfig.Name, Namespace: rosaRoleConfig.Namespace} + + err = reconciler.Client.Delete(ctx, rosaRoleConfig) + g.Expect(err).ToNot(HaveOccurred()) + + // Sleep to ensure the status is updated + time.Sleep(100 * time.Millisecond) + + _, errReconcile := reconciler.Reconcile(ctx, req) + + // Assertions - deletion should succeed + g.Expect(errReconcile).ToNot(HaveOccurred()) + + // Sleep to ensure the status is updated + time.Sleep(100 * time.Millisecond) + + deletedRoleConfig := &expinfrav1.ROSARoleConfig{} + + // Verify the resource has been deleted (finalizers removed) + err = reconciler.Client.Get(ctx, req.NamespacedName, deletedRoleConfig) + + // The object should either be not found (fully deleted) or have no finalizers + if err == nil { + // If object still exists, verify finalizers are removed + g.Expect(deletedRoleConfig.Finalizers).To(BeEmpty(), "Finalizers should be removed after successful deletion") + } +} diff --git a/exp/controllers/suite_test.go b/exp/controllers/suite_test.go index 9283f003e9..5d68de26b0 100644 --- a/exp/controllers/suite_test.go +++ b/exp/controllers/suite_test.go @@ -31,8 +31,7 @@ import ( rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // These tests use Ginkgo (BDD-style Go testing framework). Refer to @@ -53,7 +52,6 @@ func setup() { utilruntime.Must(infrav1.AddToScheme(scheme.Scheme)) utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme)) utilruntime.Must(expinfrav1.AddToScheme(scheme.Scheme)) - utilruntime.Must(expclusterv1.AddToScheme(scheme.Scheme)) utilruntime.Must(corev1.AddToScheme(scheme.Scheme)) utilruntime.Must(rosacontrolplanev1.AddToScheme(scheme.Scheme)) testEnvConfig := helpers.NewTestEnvironmentConfiguration([]string{ @@ -86,6 +84,12 @@ func setup() { if err := (&expinfrav1.ROSAMachinePool{}).SetupWebhookWithManager(testEnv); err != nil { panic(fmt.Sprintf("Unable to setup ROSAMachinePool webhook: %v", err)) } + if err := (&expinfrav1.ROSARoleConfig{}).SetupWebhookWithManager(testEnv); err != nil { + panic(fmt.Sprintf("Unable to setup ROSARoleConfig webhook: %v", err)) + } + if err := (&expinfrav1.ROSANetwork{}).SetupWebhookWithManager(testEnv); err != nil { + panic(fmt.Sprintf("Unable to setup ROSANetwork webhook: %v", err)) + } if err := (&rosacontrolplanev1.ROSAControlPlane{}).SetupWebhookWithManager(testEnv); err != nil { panic(fmt.Sprintf("Unable to setup ROSAControlPlane webhook: %v", err)) } diff --git a/exp/instancestate/suite_test.go b/exp/instancestate/suite_test.go index 2e669f7bfd..e1e9ecbb77 100644 --- a/exp/instancestate/suite_test.go +++ b/exp/instancestate/suite_test.go @@ -30,8 +30,7 @@ import ( expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/instancestate/mock_sqsiface" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // These tests use Ginkgo (BDD-style Go testing framework). Refer to @@ -55,7 +54,6 @@ func setup() { utilruntime.Must(infrav1.AddToScheme(scheme.Scheme)) utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme)) utilruntime.Must(expinfrav1.AddToScheme(scheme.Scheme)) - utilruntime.Must(expclusterv1.AddToScheme(scheme.Scheme)) testEnvConfig := helpers.NewTestEnvironmentConfiguration([]string{ path.Join("config", "crd", "bases"), }, diff --git a/exp/utils/rosa_helper.go b/exp/utils/rosa_helper.go index fc08747874..957968cdb3 100644 --- a/exp/utils/rosa_helper.go +++ b/exp/utils/rosa_helper.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/utils/ptr" + rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" ) @@ -43,13 +44,14 @@ func NodePoolToRosaMachinePoolSpec(nodePool *cmv1.NodePool) expinfrav1.RosaMachi TuningConfigs: nodePool.TuningConfigs(), AdditionalSecurityGroups: nodePool.AWSNodePool().AdditionalSecurityGroupIds(), VolumeSize: nodePool.AWSNodePool().RootVolume().Size(), + CapacityReservationID: nodePool.AWSNodePool().CapacityReservation().Id(), // nodePool.AWSNodePool().Tags() returns all tags including "system" tags if "fetchUserTagsOnly" parameter is not specified. // TODO: enable when AdditionalTags day2 changes is supported. // AdditionalTags: nodePool.AWSNodePool().Tags(), } if nodePool.Autoscaling() != nil { - spec.Autoscaling = &expinfrav1.RosaMachinePoolAutoScaling{ + spec.Autoscaling = &rosacontrolplanev1.AutoScaling{ MinReplicas: nodePool.Autoscaling().MinReplica(), MaxReplicas: nodePool.Autoscaling().MaxReplica(), } diff --git a/exp/utils/rosa_helper_test.go b/exp/utils/rosa_helper_test.go index f298ea9be4..08191a72f5 100644 --- a/exp/utils/rosa_helper_test.go +++ b/exp/utils/rosa_helper_test.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/utils/ptr" + rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" ) @@ -81,7 +82,7 @@ func TestNodePoolToRosaMachinePoolSpec(t *testing.T) { TuningConfigs: []string{"tuning1"}, AdditionalSecurityGroups: []string{"sg-123", "sg-456"}, VolumeSize: 120, - Autoscaling: &expinfrav1.RosaMachinePoolAutoScaling{ + Autoscaling: &rosacontrolplanev1.AutoScaling{ MinReplicas: 2, MaxReplicas: 5, }, diff --git a/go.mod b/go.mod index 1ffc472259..ad5ab78087 100644 --- a/go.mod +++ b/go.mod @@ -2,15 +2,18 @@ module sigs.k8s.io/cluster-api-provider-aws/v2 go 1.24.0 +// Required to include https://github.com/kubernetes-sigs/cluster-api/pull/13022 +replace sigs.k8s.io/cluster-api/test => sigs.k8s.io/cluster-api/test v1.11.4-0.20251125201037-d322ff6baa2f + require ( github.com/alessio/shellescape v1.4.2 github.com/apparentlymart/go-cidr v1.1.0 github.com/aws/amazon-vpc-cni-k8s v1.15.5 github.com/aws/aws-lambda-go v1.41.0 - github.com/aws/aws-sdk-go-v2 v1.38.0 - github.com/aws/aws-sdk-go-v2/config v1.31.0 - github.com/aws/aws-sdk-go-v2/credentials v1.18.4 - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.18.4 + github.com/aws/aws-sdk-go-v2 v1.39.2 + github.com/aws/aws-sdk-go-v2/config v1.31.12 + github.com/aws/aws-sdk-go-v2/credentials v1.18.16 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.12 github.com/aws/aws-sdk-go-v2/service/autoscaling v1.52.4 github.com/aws/aws-sdk-go-v2/service/cloudtrail v1.52.0 github.com/aws/aws-sdk-go-v2/service/configservice v1.56.0 @@ -22,91 +25,111 @@ require ( github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.45.2 github.com/aws/aws-sdk-go-v2/service/iam v1.32.0 github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.26.6 - github.com/aws/aws-sdk-go-v2/service/s3 v1.87.0 + github.com/aws/aws-sdk-go-v2/service/s3 v1.88.4 github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.28.6 github.com/aws/aws-sdk-go-v2/service/ssm v1.59.1 - github.com/aws/aws-sdk-go-v2/service/sts v1.37.0 - github.com/aws/smithy-go v1.22.5 + github.com/aws/aws-sdk-go-v2/service/sts v1.38.6 + github.com/aws/smithy-go v1.23.0 github.com/awslabs/goformation/v4 v4.19.5 github.com/blang/semver v3.5.1+incompatible github.com/coreos/ignition v0.35.0 - github.com/coreos/ignition/v2 v2.16.2 - github.com/go-logr/logr v1.4.2 + github.com/coreos/ignition/v2 v2.24.0 + github.com/go-logr/logr v1.4.3 github.com/gofrs/flock v0.8.1 github.com/golang/mock v1.6.0 github.com/google/go-cmp v0.7.0 github.com/google/goexpect v0.0.0-20210430020637-ab937bf7fd6f - github.com/google/gofuzz v1.2.0 - github.com/onsi/ginkgo/v2 v2.23.3 - github.com/onsi/gomega v1.36.3 - github.com/openshift-online/ocm-common v0.0.29 - github.com/openshift-online/ocm-sdk-go v0.1.465 - github.com/openshift/rosa v1.2.55 + github.com/onsi/ginkgo/v2 v2.23.4 + github.com/onsi/gomega v1.38.0 + github.com/openshift-online/ocm-api-model/clientapi v0.0.431 + github.com/openshift-online/ocm-common v0.0.31 + github.com/openshift-online/ocm-sdk-go v0.1.476 + github.com/openshift/rosa v1.2.57 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/client_golang v1.23.0 github.com/sergi/go-diff v1.3.1 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.9.1 - github.com/spf13/pflag v1.0.6 + github.com/spf13/pflag v1.0.10 + github.com/stretchr/testify v1.11.1 github.com/zgalor/weberr v0.8.2 - golang.org/x/crypto v0.36.0 - golang.org/x/net v0.38.0 - golang.org/x/text v0.23.0 + go.uber.org/mock v0.5.2 + golang.org/x/crypto v0.45.0 + golang.org/x/text v0.31.0 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.32.3 - k8s.io/apiextensions-apiserver v0.32.3 - k8s.io/apimachinery v0.32.3 - k8s.io/apiserver v0.32.3 - k8s.io/cli-runtime v0.32.3 - k8s.io/client-go v0.32.3 - k8s.io/component-base v0.32.3 + k8s.io/api v0.33.4 + k8s.io/apiextensions-apiserver v0.33.4 + k8s.io/apimachinery v0.33.4 + k8s.io/cli-runtime v0.33.4 + k8s.io/client-go v0.33.4 + k8s.io/component-base v0.33.4 k8s.io/klog/v2 v2.130.1 - k8s.io/kubectl v0.32.3 - k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 + k8s.io/kubectl v0.33.4 + k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d sigs.k8s.io/aws-iam-authenticator v0.6.13 - sigs.k8s.io/cluster-api v1.10.2 - sigs.k8s.io/cluster-api/test v1.10.2 - sigs.k8s.io/controller-runtime v0.20.4 - sigs.k8s.io/yaml v1.4.0 + sigs.k8s.io/cluster-api v1.11.1 + sigs.k8s.io/cluster-api/test v1.11.1 + sigs.k8s.io/controller-runtime v0.21.0 + sigs.k8s.io/yaml v1.6.0 ) -require github.com/aws/aws-sdk-go v1.55.7 // indirect +require ( + github.com/AlecAivazis/survey/v2 v2.2.15 // indirect + github.com/coreos/go-semver v0.3.1 // indirect + github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect + github.com/coreos/go-systemd/v22 v22.6.0 // indirect + github.com/coreos/vcontext v0.0.0-20230201181013-d72178a18687 // indirect + github.com/go-jose/go-jose/v4 v4.1.1 // indirect + github.com/itchyny/gojq v0.12.9 // indirect + github.com/itchyny/timefmt-go v0.1.4 // indirect + github.com/jackc/chunkreader/v2 v2.0.1 // indirect + github.com/jackc/pgconn v1.14.3 // indirect + github.com/jackc/pgio v1.0.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgproto3/v2 v2.3.3 // indirect + github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect + github.com/jackc/pgtype v1.14.0 // indirect + github.com/jackc/pgx/v4 v4.18.3 // indirect + github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect + github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect + github.com/openshift-online/ocm-api-model/model v0.0.431 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/robfig/cron/v3 v3.0.1 // indirect + github.com/vincent-petithory/dataurl v1.0.0 // indirect + golang.org/x/net v0.47.0 // indirect +) require ( al.essio.dev/pkg/shellescape v1.5.1 // indirect - cel.dev/expr v0.18.0 // indirect - dario.cat/mergo v1.0.1 // indirect + cel.dev/expr v0.24.0 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect github.com/99designs/keyring v1.2.2 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/BurntSushi/toml v1.4.0 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect - github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver/v3 v3.3.0 // indirect - github.com/Masterminds/sprig/v3 v3.3.0 // indirect github.com/Microsoft/go-winio v0.5.0 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 // indirect github.com/adrg/xdg v0.5.3 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect - github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.3 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.9 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.9 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.9 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.3 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.9 // indirect github.com/aws/aws-sdk-go-v2/service/cloudformation v1.50.0 github.com/aws/aws-sdk-go-v2/service/eventbridge v1.39.3 - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.9 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.9 // indirect github.com/aws/aws-sdk-go-v2/service/organizations v1.27.3 // indirect github.com/aws/aws-sdk-go-v2/service/servicequotas v1.21.4 github.com/aws/aws-sdk-go-v2/service/sqs v1.38.8 - github.com/aws/aws-sdk-go-v2/service/sso v1.28.0 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.33.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.29.6 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.1 // indirect github.com/aymerick/douceur v0.2.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect @@ -114,15 +137,13 @@ require ( github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cloudflare/circl v1.6.1 // indirect - github.com/coreos/go-semver v0.3.1 // indirect - github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/coreos/vcontext v0.0.0-20230201181013-d72178a18687 // indirect + github.com/containerd/errdefs v1.0.0 // indirect + github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect github.com/danieljoos/wincred v1.2.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/docker v28.0.2+incompatible // indirect + github.com/docker/docker v28.3.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 // indirect @@ -146,22 +167,19 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.2 // indirect github.com/golang/glog v1.2.5 // indirect - github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/cel-go v0.22.0 // indirect - github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect + github.com/google/cel-go v0.23.2 // indirect + github.com/google/gnostic-models v0.6.9 // indirect github.com/google/go-github/v53 v53.2.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/goterm v0.0.0-20190703233501-fc88cf888a3f // indirect - github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect - github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 // indirect + github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/css v1.0.1 // indirect - github.com/gorilla/websocket v1.5.3 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect github.com/hashicorp/go-version v1.6.0 // indirect - github.com/huandu/xstrings v1.5.0 // indirect github.com/imdario/mergo v0.3.13 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect @@ -172,12 +190,11 @@ require ( github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect - github.com/microcosm-cc/bluemonday v1.0.26 // indirect - github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/microcosm-cc/bluemonday v1.0.27 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect - github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/spdystream v0.5.0 // indirect + github.com/moby/sys/sequential v0.6.0 // indirect github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -190,57 +207,59 @@ require ( github.com/opencontainers/image-spec v1.1.0-rc5 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.2.3 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.65.0 // indirect + github.com/prometheus/procfs v0.17.0 // indirect github.com/rivo/uniseg v0.4.2 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sagikazarmark/locafero v0.7.0 // indirect github.com/sanathkr/go-yaml v0.0.0-20170819195128-ed9d249f429b // indirect github.com/sanathkr/yaml v0.0.0-20170819201035-0056894fa522 // indirect - github.com/shopspring/decimal v1.4.0 // indirect github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spf13/afero v1.12.0 // indirect github.com/spf13/cast v1.7.1 // indirect - github.com/spf13/viper v1.20.0 // indirect + github.com/spf13/viper v1.20.1 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect - github.com/valyala/fastjson v1.6.4 // indirect - github.com/vincent-petithory/dataurl v1.0.0 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/zalando/go-keyring v0.2.3 // indirect gitlab.com/c0b/go-ordered-json v0.0.0-20201030195603-febf46534d5a // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect - go.opentelemetry.io/otel v1.29.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.29.0 // indirect - go.opentelemetry.io/otel/sdk v1.29.0 // indirect - go.opentelemetry.io/otel/trace v1.29.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect - go.uber.org/mock v0.5.2 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect + go.opentelemetry.io/otel v1.37.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect + go.opentelemetry.io/otel/metric v1.37.0 // indirect + go.opentelemetry.io/otel/sdk v1.37.0 // indirect + go.opentelemetry.io/otel/trace v1.37.0 // indirect + go.opentelemetry.io/proto/otlp v1.4.0 // indirect + go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect - golang.org/x/oauth2 v0.28.0 // indirect - golang.org/x/sync v0.12.0 // indirect - golang.org/x/sys v0.31.0 // indirect - golang.org/x/term v0.30.0 // indirect - golang.org/x/time v0.8.0 // indirect - golang.org/x/tools v0.30.0 // indirect + golang.org/x/oauth2 v0.32.0 // indirect + golang.org/x/sync v0.18.0 // indirect + golang.org/x/sys v0.38.0 // indirect + golang.org/x/term v0.37.0 // indirect + golang.org/x/time v0.13.0 // indirect + golang.org/x/tools v0.38.0 // indirect gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 // indirect - google.golang.org/grpc v1.67.3 // indirect - google.golang.org/protobuf v1.36.5 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250818200422-3122310a409c // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251002232023-7c0ddcbb5797 // indirect + google.golang.org/grpc v1.75.1 // indirect + google.golang.org/protobuf v1.36.10 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/cluster-bootstrap v0.32.3 // indirect - k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect + k8s.io/apiserver v0.33.4 + k8s.io/cluster-bootstrap v0.33.3 // indirect + k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect - sigs.k8s.io/kind v0.27.0 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect + sigs.k8s.io/kind v0.30.0 // indirect + sigs.k8s.io/randfill v1.0.0 + sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect ) diff --git a/go.sum b/go.sum index 61860ee4e4..65adb705a5 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,7 @@ al.essio.dev/pkg/shellescape v1.5.1 h1:86HrALUujYS/h+GtqoB26SBEdkWfmMI6FubjXlsXyho= al.essio.dev/pkg/shellescape v1.5.1/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= -cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= -cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= @@ -20,6 +20,8 @@ github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= @@ -28,6 +30,8 @@ github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXG github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8 h1:xzYJEypr/85nBpB11F9br+3HUrpgb+fcm5iADzXXYEw= +github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc= github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 h1:wPbRQzjjwFc0ih8puEVAOFGELsn1zoIIYdxvML7mDxA= github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8/go.mod h1:I0gYDMZ6Z5GRU7l58bNFSkPTFN6Yl12dsUlAZ8xy98g= github.com/adrg/xdg v0.5.3 h1:xRnxJXne7+oWDatRhR1JLnvuccuIeCoBu2rtuLqQB78= @@ -40,34 +44,30 @@ github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4t github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY= -github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/aws/amazon-vpc-cni-k8s v1.15.5 h1:/mqTXB4HoGYg4CiU4Gco9iEvZ+V/309Na4HEMPgok5Q= github.com/aws/amazon-vpc-cni-k8s v1.15.5/go.mod h1:jV4wNtmgT2Ra1/oZU99DPOFsCUKnf0mYfIyzDyAUVAY= github.com/aws/aws-lambda-go v1.41.0 h1:l/5fyVb6Ud9uYd411xdHZzSf2n86TakxzpvIoz7l+3Y= github.com/aws/aws-lambda-go v1.41.0/go.mod h1:jwFe2KmMsHmffA1X2R09hH6lFzJQxzI8qK17ewzbQMM= -github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE= -github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= -github.com/aws/aws-sdk-go-v2 v1.38.0 h1:UCRQ5mlqcFk9HJDIqENSLR3wiG1VTWlyUfLDEvY7RxU= -github.com/aws/aws-sdk-go-v2 v1.38.0/go.mod h1:9Q0OoGQoboYIAJyslFyF1f5K1Ryddop8gqMhWx/n4Wg= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0 h1:6GMWV6CNpA/6fbFHnoAjrv4+LGfyTqZz2LtCHnspgDg= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0/go.mod h1:/mXlTIVG9jbxkqDnr5UQNQxW1HRYxeGklkM9vAFeabg= -github.com/aws/aws-sdk-go-v2/config v1.31.0 h1:9yH0xiY5fUnVNLRWO0AtayqwU1ndriZdN78LlhruJR4= -github.com/aws/aws-sdk-go-v2/config v1.31.0/go.mod h1:VeV3K72nXnhbe4EuxxhzsDc/ByrCSlZwUnWH52Nde/I= -github.com/aws/aws-sdk-go-v2/credentials v1.18.4 h1:IPd0Algf1b+Qy9BcDp0sCUcIWdCQPSzDoMK3a8pcbUM= -github.com/aws/aws-sdk-go-v2/credentials v1.18.4/go.mod h1:nwg78FjH2qvsRM1EVZlX9WuGUJOL5od+0qvm0adEzHk= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.3 h1:GicIdnekoJsjq9wqnvyi2elW6CGMSYKhdozE7/Svh78= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.3/go.mod h1:R7BIi6WNC5mc1kfRM7XM/VHC3uRWkjc396sfabq4iOo= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.18.4 h1:0SzCLoPRSK3qSydsaFQWugP+lOBCTPwfcBOm6222+UA= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.18.4/go.mod h1:JAet9FsBHjfdI+TnMBX4ModNNaQHAd3dc/Bk+cNsxeM= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.3 h1:o9RnO+YZ4X+kt5Z7Nvcishlz0nksIt2PIzDglLMP0vA= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.3/go.mod h1:+6aLJzOG1fvMOyzIySYjOFjcguGvVRL68R+uoRencN4= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.3 h1:joyyUFhiTQQmVK6ImzNU9TQSNRNeD9kOklqTzyk5v6s= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.3/go.mod h1:+vNIyZQP3b3B1tSLI0lxvrU9cfM7gpdRXMFfm67ZcPc= +github.com/aws/aws-sdk-go-v2 v1.39.2 h1:EJLg8IdbzgeD7xgvZ+I8M1e0fL0ptn/M47lianzth0I= +github.com/aws/aws-sdk-go-v2 v1.39.2/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 h1:i8p8P4diljCr60PpJp6qZXNlgX4m2yQFpYk+9ZT+J4E= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1/go.mod h1:ddqbooRZYNoJ2dsTwOty16rM+/Aqmk/GOXrK8cg7V00= +github.com/aws/aws-sdk-go-v2/config v1.31.12 h1:pYM1Qgy0dKZLHX2cXslNacbcEFMkDMl+Bcj5ROuS6p8= +github.com/aws/aws-sdk-go-v2/config v1.31.12/go.mod h1:/MM0dyD7KSDPR+39p9ZNVKaHDLb9qnfDurvVS2KAhN8= +github.com/aws/aws-sdk-go-v2/credentials v1.18.16 h1:4JHirI4zp958zC026Sm+V4pSDwW4pwLefKrc0bF2lwI= +github.com/aws/aws-sdk-go-v2/credentials v1.18.16/go.mod h1:qQMtGx9OSw7ty1yLclzLxXCRbrkjWAM7JnObZjmCB7I= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.9 h1:Mv4Bc0mWmv6oDuSWTKnk+wgeqPL5DRFu5bQL9BGPQ8Y= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.9/go.mod h1:IKlKfRppK2a1y0gy1yH6zD+yX5uplJ6UuPlgd48dJiQ= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.12 h1:ofHawDLJTI6ytDIji+g4dXQ6u2idzTb04tDlN9AS614= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.12/go.mod h1:f5pL4iLDfbcxj1SZcdRdIokBB5eHbuYPS/Fs9DwUPRQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.9 h1:se2vOWGD3dWQUtfn4wEjRQJb1HK1XsNIt825gskZ970= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.9/go.mod h1:hijCGH2VfbZQxqCDN7bwz/4dzxV+hkyhjawAtdPWKZA= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.9 h1:6RBnKZLkJM4hQ+kN6E7yWFveOTg8NLPHAkqrs4ZPlTU= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.9/go.mod h1:V9rQKRmK7AWuEsOMnHzKj8WyrIir1yUJbZxDuZLFvXI= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.3 h1:ZV2XK2L3HBq9sCKQiQ/MdhZJppH/rH0vddEAamsHUIs= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.3/go.mod h1:b9F9tk2HdHpbf3xbN7rUZcfmJI26N6NcJu/8OsBFI/0= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.9 h1:w9LnHqTq8MEdlnyhV4Bwfizd65lfNCNgdlNC6mM5paE= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.9/go.mod h1:LGEP6EK4nj+bwWNdrvX/FnDTFowdBNwcSPuZu/ouFys= github.com/aws/aws-sdk-go-v2/service/autoscaling v1.52.4 h1:vzLD0FyNU4uxf2QE5UDG0jSEitiJXbVEUwf2Sk3usF4= github.com/aws/aws-sdk-go-v2/service/autoscaling v1.52.4/go.mod h1:CDqMoc3KRdZJ8qziW96J35lKH01Wq3B2aihtHj2JbRs= github.com/aws/aws-sdk-go-v2/service/cloudformation v1.50.0 h1:Ap5tOJfeAH1hO2UQc3X3uMlwP7uryFeZXMvZCXIlLSE= @@ -92,20 +92,20 @@ github.com/aws/aws-sdk-go-v2/service/eventbridge v1.39.3 h1:T6L7fsONflMeXuvsT8qZ github.com/aws/aws-sdk-go-v2/service/eventbridge v1.39.3/go.mod h1:sIrUII6Z+hAVAgcpmsc2e9HvEr++m/v8aBPT7s4ZYUk= github.com/aws/aws-sdk-go-v2/service/iam v1.32.0 h1:ZNlfPdw849gBo/lvLFbEEvpTJMij0LXqiNWZ+lIamlU= github.com/aws/aws-sdk-go-v2/service/iam v1.32.0/go.mod h1:aXWImQV0uTW35LM0A/T4wEg6R1/ReXUu4SM6/lUHYK0= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 h1:6+lZi2JeGKtCraAj1rpoZfKqnQ9SptseRZioejfUOLM= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0/go.mod h1:eb3gfbVIxIoGgJsi9pGne19dhCBpK6opTYpQqAmdy44= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.3 h1:3ZKmesYBaFX33czDl6mbrcHb6jeheg6LqjJhQdefhsY= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.3/go.mod h1:7ryVb78GLCnjq7cw45N6oUb9REl7/vNUwjvIqC5UgdY= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.3 h1:ieRzyHXypu5ByllM7Sp4hC5f/1Fy5wqxqY0yB85hC7s= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.3/go.mod h1:O5ROz8jHiOAKAwx179v+7sHMhfobFVi6nZt8DEyiYoM= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.3 h1:SE/e52dq9a05RuxzLcjT+S5ZpQobj3ie3UTaSf2NnZc= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.3/go.mod h1:zkpvBTsR020VVr8TOrwK2TrUW9pOir28sH5ECHpnAfo= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.0 h1:X0FveUndcZ3lKbSpIC6rMYGRiQTcUVRNH6X4yYtIrlU= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.0/go.mod h1:IWjQYlqw4EX9jw2g3qnEPPWvCE6bS8fKzhMed1OK7c8= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.9 h1:5r34CgVOD4WZudeEKZ9/iKpiT6cM1JyEROpXjOcdWv8= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.9/go.mod h1:dB12CEbNWPbzO2uC6QSWHteqOg4JfBVJOojbAoAUb5I= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.9 h1:wuZ5uW2uhJR63zwNlqWH2W4aL4ZjeJP3o92/W+odDY4= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.9/go.mod h1:/G58M2fGszCrOzvJUkDdY8O9kycodunH4VdT5oBAqls= github.com/aws/aws-sdk-go-v2/service/organizations v1.27.3 h1:CnPWlONzFX9/yO6IGuKg9sWUE8WhKztYRFbhmOHXjJI= github.com/aws/aws-sdk-go-v2/service/organizations v1.27.3/go.mod h1:hUHSXe9HFEmLfHrXndAX5e69rv0nBsg22VuNQYl0JLM= github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.26.6 h1:PwbxovpcJvb25k019bkibvJfCpCmIANOFrXZIFPmRzk= github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.26.6/go.mod h1:Z4xLt5mXspLKjBV92i165wAJ/3T6TIv4n7RtIS8pWV0= -github.com/aws/aws-sdk-go-v2/service/s3 v1.87.0 h1:egoDf+Geuuntmw79Mz6mk9gGmELCPzg5PFEABOHB+6Y= -github.com/aws/aws-sdk-go-v2/service/s3 v1.87.0/go.mod h1:t9MDi29H+HDbkolTSQtbI0HP9DemAWQzUjmWC7LGMnE= +github.com/aws/aws-sdk-go-v2/service/s3 v1.88.4 h1:mUI3b885qJgfqKDUSj6RgbRqLdX0wGmg8ruM03zNfQA= +github.com/aws/aws-sdk-go-v2/service/s3 v1.88.4/go.mod h1:6v8ukAxc7z4x4oBjGUsLnH7KGLY9Uhcgij19UJNkiMg= github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.28.6 h1:TIOEjw0i2yyhmhRry3Oeu9YtiiHWISZ6j/irS1W3gX4= github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.28.6/go.mod h1:3Ba++UwWd154xtP4FRX5pUK3Gt4up5sDHCve6kVfE+g= github.com/aws/aws-sdk-go-v2/service/servicequotas v1.21.4 h1:SSDkZRAO8Ok5SoQ4BJ0onDeb0ga8JBOCkUmNEpRChcw= @@ -114,14 +114,14 @@ github.com/aws/aws-sdk-go-v2/service/sqs v1.38.8 h1:80dpSqWMwx2dAm30Ib7J6ucz1ZHf github.com/aws/aws-sdk-go-v2/service/sqs v1.38.8/go.mod h1:IzNt/udsXlETCdvBOL0nmyMe2t9cGmXmZgsdoZGYYhI= github.com/aws/aws-sdk-go-v2/service/ssm v1.59.1 h1:Z4cmgV3hKuUIkhJsdn47hf/ABYHUtILfMrV+L8+kRwE= github.com/aws/aws-sdk-go-v2/service/ssm v1.59.1/go.mod h1:PUWUl5MDiYNQkUHN9Pyd9kgtA/YhbxnSnHP+yQqzrM8= -github.com/aws/aws-sdk-go-v2/service/sso v1.28.0 h1:Mc/MKBf2m4VynyJkABoVEN+QzkfLqGj0aiJuEe7cMeM= -github.com/aws/aws-sdk-go-v2/service/sso v1.28.0/go.mod h1:iS5OmxEcN4QIPXARGhavH7S8kETNL11kym6jhoS7IUQ= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.33.0 h1:6csaS/aJmqZQbKhi1EyEMM7yBW653Wy/B9hnBofW+sw= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.33.0/go.mod h1:59qHWaY5B+Rs7HGTuVGaC32m0rdpQ68N8QCN3khYiqs= -github.com/aws/aws-sdk-go-v2/service/sts v1.37.0 h1:MG9VFW43M4A8BYeAfaJJZWrroinxeTi2r3+SnmLQfSA= -github.com/aws/aws-sdk-go-v2/service/sts v1.37.0/go.mod h1:JdeBDPgpJfuS6rU/hNglmOigKhyEZtBmbraLE4GK1J8= -github.com/aws/smithy-go v1.22.5 h1:P9ATCXPMb2mPjYBgueqJNCA5S9UfktsW0tTxi+a7eqw= -github.com/aws/smithy-go v1.22.5/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= +github.com/aws/aws-sdk-go-v2/service/sso v1.29.6 h1:A1oRkiSQOWstGh61y4Wc/yQ04sqrQZr1Si/oAXj20/s= +github.com/aws/aws-sdk-go-v2/service/sso v1.29.6/go.mod h1:5PfYspyCU5Vw1wNPsxi15LZovOnULudOQuVxphSflQA= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.1 h1:5fm5RTONng73/QA73LhCNR7UT9RpFH3hR6HWL6bIgVY= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.1/go.mod h1:xBEjWD13h+6nq+z4AkqSfSvqRKFgDIQeaMguAJndOWo= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.6 h1:p3jIvqYwUZgu/XYeI48bJxOhvm47hZb5HUQ0tn6Q9kA= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.6/go.mod h1:WtKK+ppze5yKPkZ0XwqIVWD4beCwv056ZbPQNoeHqM8= +github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE= +github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/awslabs/goformation/v4 v4.19.5 h1:Y+Tzh01tWg8gf//AgGKUamaja7Wx9NPiJf1FpZu4/iU= github.com/awslabs/goformation/v4 v4.19.5/go.mod h1:JoNpnVCBOUtEz9bFxc9sjy8uBUCLF5c4D1L7RhRTVM8= github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= @@ -145,28 +145,37 @@ github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtM github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/coredns/caddy v1.1.1 h1:2eYKZT7i6yxIfGP3qLJoJ7HAsDJqYB+X68g4NYjSrE0= github.com/coredns/caddy v1.1.1/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4= -github.com/coredns/corefile-migration v1.0.26 h1:xiiEkVB1Dwolb24pkeDUDBfygV9/XsOSq79yFCrhptY= -github.com/coredns/corefile-migration v1.0.26/go.mod h1:56DPqONc3njpVPsdilEnfijCwNGC3/kTJLl7i7SPavY= +github.com/coredns/corefile-migration v1.0.27 h1:WIIw5sU0LfGgoGnhdrYdVcto/aWmJoGA/C62iwkU0JM= +github.com/coredns/corefile-migration v1.0.27/go.mod h1:56DPqONc3njpVPsdilEnfijCwNGC3/kTJLl7i7SPavY= github.com/coreos/go-json v0.0.0-20230131223807-18775e0fb4fb h1:rmqyI19j3Z/74bIRhuC59RB442rXUazKNueVpfJPxg4= github.com/coreos/go-json v0.0.0-20230131223807-18775e0fb4fb/go.mod h1:rcFZM3uxVvdyNmsAV2jopgPD1cs5SPWJWU5dOz2LUnw= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo= +github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU= github.com/coreos/ignition v0.35.0 h1:UFodoYq1mOPrbEjtxIsZbThcDyQwAI1owczRDqWmKkQ= github.com/coreos/ignition v0.35.0/go.mod h1:WJQapxzEn9DE0ryxsGvm8QnBajm/XsS/PkrDqSpz+bA= -github.com/coreos/ignition/v2 v2.16.2 h1:wPpxTovdzCLJISYmNiM5Cpw4qCPc3/P2ibruPyS46eA= -github.com/coreos/ignition/v2 v2.16.2/go.mod h1:Y1BKC60VSNgA5oWNoLIHXigpFX1FFn4CVeimmsI+Bhg= +github.com/coreos/ignition/v2 v2.24.0 h1:TVcsSWiYvhXihD8Mss3CTRuKaNZM2OIfpoKiudIhrKo= +github.com/coreos/ignition/v2 v2.24.0/go.mod h1:HelGgFZ1WZ4ZPOIDS0a06A2JTdbbdAine5r3AkSYz5s= github.com/coreos/vcontext v0.0.0-20230201181013-d72178a18687 h1:uSmlDgJGbUB0bwQBcZomBTottKwEDF5fF8UjSwKSzWM= github.com/coreos/vcontext v0.0.0-20230201181013-d72178a18687/go.mod h1:Salmysdw7DAVuobBW/LwsKKgpyCPHUhjyJoMJD+ZJiI= github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= @@ -178,16 +187,14 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v28.0.2+incompatible h1:9BILleFwug5FSSqWBgVevgL3ewDJfWWWyZVqlDMttE8= -github.com/docker/docker v28.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI= +github.com/docker/docker v28.3.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 h1:7QPwrLT79GlD5sizHf27aoY2RTvw62mO6x7mxkScNk0= github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46/go.mod h1:esf2rsHFNlZlxsqsZDojNBcnNs5REqIvRrWRHqX0vEU= -github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= -github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/dvsekhvalnov/jose2go v1.6.0 h1:Y9gnSnP4qEI0+/uQkHvFXeD2PLPJeXEL+ySMEA2EjTY= github.com/dvsekhvalnov/jose2go v1.6.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= @@ -212,9 +219,13 @@ github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/ github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-jose/go-jose/v4 v4.1.1 h1:JYhSgy4mXXzAdF3nUx3ygx347LRXJRrpgyU3adRmkAI= +github.com/go-jose/go-jose/v4 v4.1.1/go.mod h1:BdsZGqgdO3b6tTc6LSE56wcDbMMLuPsw5d4ZD5f94kA= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= @@ -227,6 +238,7 @@ github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= @@ -236,11 +248,12 @@ github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4 github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= @@ -266,10 +279,10 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= -github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8= -github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU= -github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49/go.mod h1:BkkQ4L1KS1xMt2aWSPStnn55ChGC0DPOn2FQYj+f25M= +github.com/google/cel-go v0.23.2 h1:UdEe3CvQh3Nv+E/j9r1Y//WO0K0cSyD7/y0bzyLIMI4= +github.com/google/cel-go v0.23.2/go.mod h1:52Pb6QsDbC5kvgxvZhiL9QX1oZEkcUF/ZqaPx1J5Wwo= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -290,30 +303,25 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/goterm v0.0.0-20190703233501-fc88cf888a3f h1:5CjVwnuUcp5adK4gmY6i72gpVFVnZDP2h5TmPScB6u4= github.com/google/goterm v0.0.0-20190703233501-fc88cf888a3f/go.mod h1:nOFQdrUlIlx6M6ODdSpBj1NVA+VgLC6kmw60mkw34H4= -github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= -github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= -github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 h1:SJ+NtwL6QaZ21U+IrK7d0gGgpjGGvd2kz+FzTHVzdqI= -github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8= github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0= -github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= -github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174 h1:WlZsjVhE8Af9IcZDGgJGQpNflI3+MJSBhsgT5PCtzBQ= +github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI= github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= @@ -326,28 +334,57 @@ github.com/itchyny/gojq v0.12.9 h1:biKpbKwMxVYhCU1d6mR7qMr3f0Hn9F5k5YykCVb3gmM= github.com/itchyny/gojq v0.12.9/go.mod h1:T4Ip7AETUXeGpD+436m+UEl3m3tokRgajd5pRfsR5oE= github.com/itchyny/timefmt-go v0.1.4 h1:hFEfWVdwsEi+CY8xY2FtgWHGQaBaC3JeHd+cve0ynVM= github.com/itchyny/timefmt-go v0.1.4/go.mod h1:nEP7L+2YmAbT2kZ2HfSs1d8Xtw9LY8D2stDBckWakZ8= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= +github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= github.com/jackc/pgconn v1.14.3 h1:bVoTr12EGANZz66nZPkMInAV/KHD2TxH9npjXXgiB3w= github.com/jackc/pgconn v1.14.3/go.mod h1:RZbme4uasqzybK2RK5c65VsHxoyaml09lx3tXOcO/VM= github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65 h1:DadwsjnMwFjfWc9y5Wi/+Zz7xoE5ALHsRQlOctkOiHc= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUOag= github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= github.com/jackc/pgtype v1.14.0 h1:y+xUdabmyMkJLyApYuPj38mW+aAIqCe5uuBB51rH3Vw= github.com/jackc/pgtype v1.14.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= github.com/jackc/pgx/v4 v4.18.3 h1:dE2/TrEsGX3RBprb3qryqSV9Y60iZN1C6i8IrmW9/BA= github.com/jackc/pgx/v4 v4.18.3/go.mod h1:Ey4Oru5tH5sB6tV7hDmfWFahwF15Eb7DNXlRKx2CkVw= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= -github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -356,32 +393,53 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNU github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.4/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.8 h1:AkaSdXYQOWeaO3neb8EM634ahkXXe3jYbVh/F9lq+GI= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/microcosm-cc/bluemonday v1.0.26 h1:xbqSvqzQMeEHCqMi64VAs4d8uy6Mequs3rQ0k/Khz58= -github.com/microcosm-cc/bluemonday v1.0.26/go.mod h1:JyzOCs9gkyQyjs+6h10UEVSe02CGwkhd72Xdqh78TWs= +github.com/microcosm-cc/bluemonday v1.0.27 h1:MpEUotklkwCSLeH+Qdx1VJgNqLlpY2KXwXFM08ygZfk= +github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= @@ -392,6 +450,10 @@ github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3N github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= +github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= +github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= +github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= +github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -418,48 +480,59 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108 github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.23.3 h1:edHxnszytJ4lD9D5Jjc4tiDkPBZ3siDeJJkUZJJVkp0= -github.com/onsi/ginkgo/v2 v2.23.3/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= +github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= +github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.12.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= -github.com/onsi/gomega v1.36.3 h1:hID7cr8t3Wp26+cYnfcjR6HpJ00fdogN6dqZ1t6IylU= -github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= +github.com/onsi/gomega v1.38.0 h1:c/WX+w8SLAinvuKKQFh77WEucCnPk4j2OTUr7lt7BeY= +github.com/onsi/gomega v1.38.0/go.mod h1:OcXcwId0b9QsE7Y49u+BTrL4IdKOBOKnD6VQNTJEB6o= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= github.com/opencontainers/image-spec v1.1.0-rc5/go.mod h1:X4pATf0uXsnn3g5aiGIsVnJBR4mxhKzfwmvK/B2NTm8= -github.com/openshift-online/ocm-common v0.0.29 h1:EyKoLvQXKOa3UpoWHT3cMyNHBbhSZURC8Ws/cxTaT1U= -github.com/openshift-online/ocm-common v0.0.29/go.mod h1:VEkuZp9aqbXtetZ5ycND6QpvhykvTuBF3oPsVM1X3vI= -github.com/openshift-online/ocm-sdk-go v0.1.465 h1:RZr92sdcAKyLVcL19/RYOn6KVtspDUH1wc3UuO4LgiE= -github.com/openshift-online/ocm-sdk-go v0.1.465/go.mod h1:EOkylgH0bafd+SlU9YvMrIIxHJw0Hk1EnC7W1VZeW8I= -github.com/openshift/rosa v1.2.55 h1:Y6UD1474aExF4bZSh2KH4zE+Xl2NVsiuj3TLQGT9U+Y= -github.com/openshift/rosa v1.2.55/go.mod h1:EE0yTEjbwxfnH/9YbQZaUXUVbIzfPa9KCRNw19QdLsw= +github.com/openshift-online/ocm-api-model/clientapi v0.0.431 h1:oGyJaX7ERZZVqVQBQD2OtmUcArsWl+97tXMcLTerbPo= +github.com/openshift-online/ocm-api-model/clientapi v0.0.431/go.mod h1:fZwy5HY2URG9nrExvQeXrDU/08TGqZ16f8oymVEN5lo= +github.com/openshift-online/ocm-api-model/model v0.0.431 h1:vIuELb0uH2AkN5LMQLYbLrdYIULMEK6ctQkbdoNbEZQ= +github.com/openshift-online/ocm-api-model/model v0.0.431/go.mod h1:PQIoq6P8Vlb7goOdRMLK8nJY+B7HH0RTqYAa4kyidTE= +github.com/openshift-online/ocm-common v0.0.31 h1:csxB4UQAUhwhDOVBmOzUKgtemuwV9rhCkzMoeFX8zCQ= +github.com/openshift-online/ocm-common v0.0.31/go.mod h1:VEkuZp9aqbXtetZ5ycND6QpvhykvTuBF3oPsVM1X3vI= +github.com/openshift-online/ocm-sdk-go v0.1.476 h1:l5gp/QEqnocqM02m7pDeS9ndXcCTBamewVSGaymd88Y= +github.com/openshift-online/ocm-sdk-go v0.1.476/go.mod h1:ds+aOAlQbiK0ubZP3CwXkzd7m48v6fMQ1ef9UCrjzBY= +github.com/openshift/rosa v1.2.57 h1:f2nZCEdQs0a1jadLvgM9Za36ilJq6z+IhteJNnNcTlc= +github.com/openshift/rosa v1.2.57/go.mod h1:kb6iV145TXhUWBONqlflNIYNbrcYGLk/SFZD6vNx4wM= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= -github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= +github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= +github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= +github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= +github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.2 h1:YwD0ulJSJytLpiaWua0sBDusfsCZohxjxzVTYjwxfV8= github.com/rivo/uniseg v0.4.2/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo= @@ -468,17 +541,20 @@ github.com/sanathkr/go-yaml v0.0.0-20170819195128-ed9d249f429b h1:jUK33OXuZP/l6b github.com/sanathkr/go-yaml v0.0.0-20170819195128-ed9d249f429b/go.mod h1:8458kAagoME2+LN5//WxE71ysZ3B7r22fdgb7qVmXSY= github.com/sanathkr/yaml v0.0.0-20170819201035-0056894fa522 h1:fOCp11H0yuyAt2wqlbJtbyPzSgaxHTv8uN1pMpkG1t8= github.com/sanathkr/yaml v0.0.0-20170819201035-0056894fa522/go.mod h1:tQTYKOQgxoH3v6dEmdHiz4JG+nbxWwM5fgPQUpSZqVQ= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= -github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= -github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= @@ -487,16 +563,21 @@ github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= -github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.20.0 h1:zrxIyR3RQIOsarIrgL8+sAvALXul9jeEPa06Y0Ph6vY= -github.com/spf13/viper v1.20.0/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= +github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -506,14 +587,10 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= -github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= -github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ= -github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= github.com/vincent-petithory/dataurl v1.0.0 h1:cXw+kPto8NLuJtlMsI152irrVw9fRDX8AbShPRpg2CI= github.com/vincent-petithory/dataurl v1.0.0/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= @@ -521,75 +598,87 @@ github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcY github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v0.0.0-20181112162635-ac52e6811b56/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= -github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 h1:S2dVYn90KE98chqDkyE9Z4N61UnQd+KOfgp5Iu53llk= -github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/zalando/go-keyring v0.2.3 h1:v9CUu9phlABObO4LPWycf+zwMG7nlbb3t/B5wa97yms= github.com/zalando/go-keyring v0.2.3/go.mod h1:HL4k+OXQfJUWaMnqyuSOc0drfGPX2b51Du6K+MRgZMk= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= github.com/zgalor/weberr v0.8.2 h1:rzGP0jQVt8hGSNnzjDAQNHMxNNrf3gUrYhpSgY76+mk= github.com/zgalor/weberr v0.8.2/go.mod h1:cqK89mj84q3PRgqQXQFWJDzCorOd8xOtov/ulOnqDwc= github.com/ziutek/telnet v0.0.0-20180329124119-c3b780dc415b/go.mod h1:IZpXDfkJ6tWD3PhBK5YzgQT+xJWh7OsdwiG8hA2MkO4= gitlab.com/c0b/go-ordered-json v0.0.0-20201030195603-febf46534d5a h1:DxppxFKRqJ8WD6oJ3+ZXKDY0iMONQDl5UTg2aTyHh8k= gitlab.com/c0b/go-ordered-json v0.0.0-20201030195603-febf46534d5a/go.mod h1:NREvu3a57BaK0R1+ztrEzHWiZAihohNLQ6trPxlIqZI= -go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= -go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= -go.etcd.io/etcd/api/v3 v3.5.20 h1:aKfz3nPZECWoZJXMSH9y6h2adXjtOHaHTGEVCuCmaz0= -go.etcd.io/etcd/api/v3 v3.5.20/go.mod h1:QqKGViq4KTgOG43dr/uH0vmGWIaoJY3ggFi6ZH0TH/U= -go.etcd.io/etcd/client/pkg/v3 v3.5.20 h1:sZIAtra+xCo56gdf6BR62to/hiie5Bwl7hQIqMzVTEM= -go.etcd.io/etcd/client/pkg/v3 v3.5.20/go.mod h1:qaOi1k4ZA9lVLejXNvyPABrVEe7VymMF2433yyRQ7O0= -go.etcd.io/etcd/client/v2 v2.305.16 h1:kQrn9o5czVNaukf2A2At43cE9ZtWauOtf9vRZuiKXow= -go.etcd.io/etcd/client/v2 v2.305.16/go.mod h1:h9YxWCzcdvZENbfzBTFCnoNumr2ax3F19sKMqHFmXHE= -go.etcd.io/etcd/client/v3 v3.5.20 h1:jMT2MwQEhyvhQg49Cec+1ZHJzfUf6ZgcmV0GjPv0tIQ= -go.etcd.io/etcd/client/v3 v3.5.20/go.mod h1:J5lbzYRMUR20YolS5UjlqqMcu3/wdEvG5VNBhzyo3m0= -go.etcd.io/etcd/pkg/v3 v3.5.16 h1:cnavs5WSPWeK4TYwPYfmcr3Joz9BH+TZ6qoUtz6/+mc= -go.etcd.io/etcd/pkg/v3 v3.5.16/go.mod h1:+lutCZHG5MBBFI/U4eYT5yL7sJfnexsoM20Y0t2uNuY= -go.etcd.io/etcd/raft/v3 v3.5.16 h1:zBXA3ZUpYs1AwiLGPafYAKKl/CORn/uaxYDwlNwndAk= -go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI= -go.etcd.io/etcd/server/v3 v3.5.16 h1:d0/SAdJ3vVsZvF8IFVb1k8zqMZ+heGcNfft71ul9GWE= -go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= -go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= -go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0 h1:FyjCyI9jVEfqhUh2MoSkmolPjfh5fp2hnV0b0irxH4Q= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0/go.mod h1:hYwym2nDEeZfG/motx0p7L7J1N1vyzIThemQsb4g2qY= -go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= -go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= -go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= -go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= -go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= -go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= +go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= +go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= +go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= +go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko= go.uber.org/mock v0.5.2/go.mod h1:wLlUxC2vVTPTaE3UD51E0BGOAElKrILxhVSDYQLld5o= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= -golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -600,34 +689,41 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= -golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc= -golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY= +golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= -golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -642,52 +738,64 @@ golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= -golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= -golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/term v0.0.0-20210503060354-a79de5458b56/go.mod h1:tfny5GFUkzUvx4ps4ajbZsCe5lw1metzhBm9T3x7oIY= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= -golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= -golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= -golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI= +golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= -golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= +golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= +golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0= gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk= -google.golang.org/genproto v0.0.0-20241118233622-e639e219e697/go.mod h1:JJrvXBWRZaFMxBufik1a4RpFw4HhgVtBBWQeQgUj2cc= -google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= -google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 h1:TqExAhdPaB60Ux47Cn0oLV07rGnxZzIsaRhQaqS666A= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA= +google.golang.org/genproto/googleapis/api v0.0.0-20250818200422-3122310a409c h1:AtEkQdl5b6zsybXcbz00j1LwNodDuH6hVifIaNqk7NQ= +google.golang.org/genproto/googleapis/api v0.0.0-20250818200422-3122310a409c/go.mod h1:ea2MjsO70ssTfCjiwHgI0ZFqcw45Ksuk2ckf9G468GA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251002232023-7c0ddcbb5797 h1:CirRxTOwnRWVLKzDNrs0CXAaVozJoR4G9xvdRecrdpk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251002232023-7c0ddcbb5797/go.mod h1:HSkG/KdJWusxU1F6CNrwNDjBMgisKxGnc5dAZfT0mjQ= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.67.3 h1:OgPcDAFKHnH8X3O4WcO4XUc8GRDeKsKReqbQtiCj7N8= -google.golang.org/grpc v1.67.3/go.mod h1:YGaHCc6Oap+FzBJTZLBzkGSYt/cvGPFTPxkn7QfSU8s= +google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= +google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -696,20 +804,21 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= -google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= -gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -726,45 +835,50 @@ gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= -k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= -k8s.io/apiextensions-apiserver v0.32.3 h1:4D8vy+9GWerlErCwVIbcQjsWunF9SUGNu7O7hiQTyPY= -k8s.io/apiextensions-apiserver v0.32.3/go.mod h1:8YwcvVRMVzw0r1Stc7XfGAzB/SIVLunqApySV5V7Dss= -k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= -k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/apiserver v0.32.3 h1:kOw2KBuHOA+wetX1MkmrxgBr648ksz653j26ESuWNY8= -k8s.io/apiserver v0.32.3/go.mod h1:q1x9B8E/WzShF49wh3ADOh6muSfpmFL0I2t+TG0Zdgc= -k8s.io/cli-runtime v0.32.3 h1:khLF2ivU2T6Q77H97atx3REY9tXiA3OLOjWJxUrdvss= -k8s.io/cli-runtime v0.32.3/go.mod h1:vZT6dZq7mZAca53rwUfdFSZjdtLyfF61mkf/8q+Xjak= -k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= -k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= -k8s.io/cluster-bootstrap v0.32.3 h1:AqIpsUhB6MUeaAsl1WvaUw54AHRd2hfZrESlKChtd8s= -k8s.io/cluster-bootstrap v0.32.3/go.mod h1:CHbBwgOb6liDV6JFUTkx5t85T2xidy0sChBDoyYw344= -k8s.io/component-base v0.32.3 h1:98WJvvMs3QZ2LYHBzvltFSeJjEx7t5+8s71P7M74u8k= -k8s.io/component-base v0.32.3/go.mod h1:LWi9cR+yPAv7cu2X9rZanTiFKB2kHA+JjmhkKjCZRpI= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +k8s.io/api v0.33.4 h1:oTzrFVNPXBjMu0IlpA2eDDIU49jsuEorGHB4cvKupkk= +k8s.io/api v0.33.4/go.mod h1:VHQZ4cuxQ9sCUMESJV5+Fe8bGnqAARZ08tSTdHWfeAc= +k8s.io/apiextensions-apiserver v0.33.4 h1:rtq5SeXiDbXmSwxsF0MLe2Mtv3SwprA6wp+5qh/CrOU= +k8s.io/apiextensions-apiserver v0.33.4/go.mod h1:mWXcZQkQV1GQyxeIjYApuqsn/081hhXPZwZ2URuJeSs= +k8s.io/apimachinery v0.33.4 h1:SOf/JW33TP0eppJMkIgQ+L6atlDiP/090oaX0y9pd9s= +k8s.io/apimachinery v0.33.4/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/apiserver v0.33.4 h1:6N0TEVA6kASUS3owYDIFJjUH6lgN8ogQmzZvaFFj1/Y= +k8s.io/apiserver v0.33.4/go.mod h1:8ODgXMnOoSPLMUg1aAzMFx+7wTJM+URil+INjbTZCok= +k8s.io/cli-runtime v0.33.4 h1:V8NSxGfh24XzZVhXmIGzsApdBpGq0RQS2u/Fz1GvJwk= +k8s.io/cli-runtime v0.33.4/go.mod h1:V+ilyokfqjT5OI+XE+O515K7jihtr0/uncwoyVqXaIU= +k8s.io/client-go v0.33.4 h1:TNH+CSu8EmXfitntjUPwaKVPN0AYMbc9F1bBS8/ABpw= +k8s.io/client-go v0.33.4/go.mod h1:LsA0+hBG2DPwovjd931L/AoaezMPX9CmBgyVyBZmbCY= +k8s.io/cluster-bootstrap v0.33.3 h1:u2NTxJ5CFSBFXaDxLQoOWMly8eni31psVso+caq6uwI= +k8s.io/cluster-bootstrap v0.33.3/go.mod h1:p970f8u8jf273zyQ5raD8WUu2XyAl0SAWOY82o7i/ds= +k8s.io/component-base v0.33.4 h1:Jvb/aw/tl3pfgnJ0E0qPuYLT0NwdYs1VXXYQmSuxJGY= +k8s.io/component-base v0.33.4/go.mod h1:567TeSdixWW2Xb1yYUQ7qk5Docp2kNznKL87eygY8Rc= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= -k8s.io/kubectl v0.32.3 h1:VMi584rbboso+yjfv0d8uBHwwxbC438LKq+dXd5tOAI= -k8s.io/kubectl v0.32.3/go.mod h1:6Euv2aso5GKzo/UVMacV6C7miuyevpfI91SvBvV9Zdg= -k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= -k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= +k8s.io/kubectl v0.33.4 h1:nXEI6Vi+oB9hXxoAHyHisXolm/l1qutK3oZQMak4N98= +k8s.io/kubectl v0.33.4/go.mod h1:Xe7P9X4DfILvKmlBsVqUtzktkI56lEj22SJW7cFy6nE= +k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d h1:wAhiDyZ4Tdtt7e46e9M5ZSAJ/MnPGPs+Ki1gHw4w1R0= +k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/aws-iam-authenticator v0.6.13 h1:QSQcAkpt/hF97Ogyoz6sj3WD2twTd2cmxFb4e6Rs9gA= sigs.k8s.io/aws-iam-authenticator v0.6.13/go.mod h1:CnvFyzR/xeLHmUY/BD0qW6q0wp6KIwXmFp4eTfrHdP8= -sigs.k8s.io/cluster-api v1.10.2 h1:xfvtNu4Fy/41grL0ryH5xSKQjpJEWdO8HiV2lPCCozQ= -sigs.k8s.io/cluster-api v1.10.2/go.mod h1:/b9Un5Imprib6S7ZOcJitC2ep/5wN72b0pXpMQFfbTw= -sigs.k8s.io/cluster-api/test v1.10.2 h1:y6vSdS9FSAi/DNoFE2fZo2fed0m1cgW+ueBazk1g4i8= -sigs.k8s.io/cluster-api/test v1.10.2/go.mod h1:KLeRjNtQS8k5jIPvQF0QxOti/ATu5euwSusb6iFBga8= -sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= -sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= +sigs.k8s.io/cluster-api v1.11.1 h1:7CyGCTxv1p3Y2kRe1ljTj/w4TcdIdWNj0CTBc4i1aBo= +sigs.k8s.io/cluster-api v1.11.1/go.mod h1:zyrjgJ5RbXhwKcAdUlGPNK5YOHpcmxXvur+5I8lkMUQ= +sigs.k8s.io/cluster-api/test v1.11.4-0.20251125201037-d322ff6baa2f h1:Ap2kojkmgM8myhii0UkgNY6luS3GliXkGF55zAVK3ls= +sigs.k8s.io/cluster-api/test v1.11.4-0.20251125201037-d322ff6baa2f/go.mod h1:7Zfdj42bJUrgZC5cuE6Q3zer18XoZLfH+8Sv3Yf7kO0= +sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= +sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= -sigs.k8s.io/kind v0.27.0 h1:PQ3f0iAWNIj66LYkZ1ivhEg/+Zb6UPMbO+qVei/INZA= -sigs.k8s.io/kind v0.27.0/go.mod h1:RZVFmy6qcwlSWwp6xeIUv7kXCPF3i8MXsEXxW/J+gJY= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/kind v0.30.0 h1:2Xi1KFEfSMm0XDcvKnUt15ZfgRPCT0OnCBbpgh8DztY= +sigs.k8s.io/kind v0.30.0/go.mod h1:FSqriGaoTPruiXWfRnUXNykF8r2t+fHtK0P0m1AbGF8= +sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/hack/boilerplate/boilerplate.py b/hack/boilerplate/boilerplate.py index 949360f0d4..fadf453cb0 100755 --- a/hack/boilerplate/boilerplate.py +++ b/hack/boilerplate/boilerplate.py @@ -149,7 +149,7 @@ def file_passes(filename, refs, regexs): def file_extension(filename): return os.path.splitext(filename)[1].split(".")[-1].lower() -skipped_dirs = ['Godeps', 'third_party', '_gopath', '_output', '.git', 'cluster/env.sh', +skipped_dirs = ['Godeps', '_gopath', '_output', '.git', 'cluster/env.sh', "vendor", "test/e2e/generated/bindata.go", "hack/boilerplate/test", "pkg/kubectl/generated/bindata.go"] diff --git a/hack/tools/.custom-gcl.yaml b/hack/tools/.custom-gcl.yaml index 5253dd8852..9fbc2e1cac 100644 --- a/hack/tools/.custom-gcl.yaml +++ b/hack/tools/.custom-gcl.yaml @@ -1,4 +1,4 @@ -version: v2.1.0 +version: v2.7.0 name: golangci-lint-kube-api-linter destination: ./bin plugins: diff --git a/hack/tools/Makefile b/hack/tools/Makefile index b9baed4e92..c13c11d1e0 100644 --- a/hack/tools/Makefile +++ b/hack/tools/Makefile @@ -76,8 +76,8 @@ $(CONTROLLER_GEN): $(BIN_DIR) go.mod go.sum # Build controller-gen from tools fo go build -tags=tools -o $@ sigs.k8s.io/controller-tools/cmd/controller-gen CONVERSION_GEN := $(BIN_DIR)/conversion-gen -$(CONVERSION_GEN): $(BIN_DIR) go.mod go.sum $(call rwildcard,.,third_party/conversion-gen/*.*) - go build -tags=tools -o $@ ./third_party/conversion-gen +$(CONVERSION_GEN): $(BIN_DIR) go.mod go.sum + go build -tags=tools -o $@ k8s.io/code-generator/cmd/conversion-gen CONVERSION_VERIFIER := $(BIN_DIR)/conversion-verifier $(CONVERSION_VERIFIER): $(BIN_DIR) go.mod go.sum diff --git a/hack/tools/go.mod b/hack/tools/go.mod index 8dbe64f61b..71992c4ba9 100644 --- a/hack/tools/go.mod +++ b/hack/tools/go.mod @@ -9,17 +9,14 @@ require ( github.com/goreleaser/goreleaser v1.26.2 github.com/itchyny/gojq v0.12.17 github.com/joelanford/go-apidiff v0.8.3 - github.com/mikefarah/yq/v4 v4.47.2 - github.com/spf13/pflag v1.0.10 + github.com/mikefarah/yq/v4 v4.48.1 k8s.io/apimachinery v0.34.1 k8s.io/code-generator v0.34.0 - k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f - k8s.io/klog/v2 v2.130.1 sigs.k8s.io/cluster-api/hack/tools v0.0.0-20250520093716-525566440a77 sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240923090159-236e448db12c sigs.k8s.io/controller-tools v0.19.0 sigs.k8s.io/kind v0.30.0 - sigs.k8s.io/kustomize/kustomize/v5 v5.7.1 + sigs.k8s.io/kustomize/kustomize/v5 v5.8.0 sigs.k8s.io/testing_frameworks v0.1.2 ) @@ -281,7 +278,7 @@ require ( github.com/sagikazarmark/locafero v0.7.0 // indirect github.com/scylladb/go-set v1.0.3-0.20200225121959-cc7b2070d91e // indirect github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect - github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect + github.com/sergi/go-diff v1.4.0 // indirect github.com/shopspring/decimal v1.4.0 // indirect github.com/sigstore/cosign/v2 v2.2.4 // indirect github.com/sigstore/rekor v1.3.6 // indirect @@ -294,6 +291,7 @@ require ( github.com/spf13/afero v1.12.0 // indirect github.com/spf13/cast v1.7.1 // indirect github.com/spf13/cobra v1.10.1 // indirect + github.com/spf13/pflag v1.0.10 // indirect github.com/spf13/viper v1.20.1 // indirect github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect @@ -331,17 +329,17 @@ require ( go.yaml.in/yaml/v2 v2.4.2 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect gocloud.dev v0.37.0 // indirect - golang.org/x/crypto v0.41.0 // indirect + golang.org/x/crypto v0.45.0 // indirect golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6 // indirect - golang.org/x/mod v0.27.0 // indirect - golang.org/x/net v0.43.0 // indirect + golang.org/x/mod v0.29.0 // indirect + golang.org/x/net v0.47.0 // indirect golang.org/x/oauth2 v0.28.0 // indirect - golang.org/x/sync v0.16.0 // indirect - golang.org/x/sys v0.35.0 // indirect - golang.org/x/term v0.34.0 // indirect - golang.org/x/text v0.28.0 // indirect + golang.org/x/sync v0.18.0 // indirect + golang.org/x/sys v0.38.0 // indirect + golang.org/x/term v0.37.0 // indirect + golang.org/x/text v0.31.0 // indirect golang.org/x/time v0.11.0 // indirect - golang.org/x/tools v0.36.0 // indirect + golang.org/x/tools v0.38.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect google.golang.org/api v0.227.0 // indirect google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb // indirect @@ -362,16 +360,18 @@ require ( k8s.io/api v0.34.0 // indirect k8s.io/apiextensions-apiserver v0.34.0 // indirect k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 // indirect + k8s.io/gengo/v2 v2.0.0-20250604051438-85fd79dbfd9f // indirect k8s.io/klog v0.2.0 // indirect + k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect lukechampine.com/blake3 v1.2.1 // indirect sigs.k8s.io/cluster-api v1.10.2 // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect sigs.k8s.io/kubebuilder/docs/book/utils v0.0.0-20211028165026-57688c578b5d // indirect - sigs.k8s.io/kustomize/api v0.20.1 // indirect - sigs.k8s.io/kustomize/cmd/config v0.20.1 // indirect - sigs.k8s.io/kustomize/kyaml v0.20.1 // indirect + sigs.k8s.io/kustomize/api v0.21.0 // indirect + sigs.k8s.io/kustomize/cmd/config v0.21.0 // indirect + sigs.k8s.io/kustomize/kyaml v0.21.0 // indirect sigs.k8s.io/randfill v1.0.0 // indirect sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect sigs.k8s.io/yaml v1.6.0 // indirect diff --git a/hack/tools/go.sum b/hack/tools/go.sum index 30c19a1665..05305f6fbc 100644 --- a/hack/tools/go.sum +++ b/hack/tools/go.sum @@ -116,8 +116,8 @@ github.com/alecthomas/assert/v2 v2.11.0 h1:2Q9r3ki8+JYXvGsDyBXwH3LcJ+WK5D0gc5E8v github.com/alecthomas/assert/v2 v2.11.0/go.mod h1:Bze95FyfUr7x34QZrjL+XP+0qgp/zg8yS+TtBj1WA3k= github.com/alecthomas/participle/v2 v2.1.4 h1:W/H79S8Sat/krZ3el6sQMvMaahJ+XcM9WSI2naI7w2U= github.com/alecthomas/participle/v2 v2.1.4/go.mod h1:8tqVbpTX20Ru4NfYQgZf4mP18eXPTBViyMWiArNEgGI= -github.com/alecthomas/repr v0.5.1 h1:E3G4t2QbHTSNpPKBgMTln5KLkZHLOcU7r37J4pXBuIg= -github.com/alecthomas/repr v0.5.1/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= +github.com/alecthomas/repr v0.5.2 h1:SU73FTI9D1P5UNtvseffFSGmdNci/O6RsqzeXJtP0Qs= +github.com/alecthomas/repr v0.5.2/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= github.com/anchore/bubbly v0.0.0-20230518153401-87b6af8ccf22 h1:5NFK6VGgqBUOAX2SYyzFYvNdOiYDxzim8jga386FlZY= github.com/anchore/bubbly v0.0.0-20230518153401-87b6af8ccf22/go.mod h1:Kv+Mm9CdtnV8iem48iEPIwy7/N4Wmk0hpxYNH5gTwKQ= github.com/anchore/go-logger v0.0.0-20230725134548-c21dafa1ec5a h1:nJ2G8zWKASyVClGVgG7sfM5mwoZlZ2zYpIzN2OhjWkw= @@ -621,8 +621,8 @@ github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZ github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= -github.com/mikefarah/yq/v4 v4.47.2 h1:Jb5fHlvgK5eeaPbreG9UJs1E5w6l5hUzXjeaY6LTTWY= -github.com/mikefarah/yq/v4 v4.47.2/go.mod h1:ulYbZUzGJsBDDwO5ohvk/KOW4vW5Iddd/DBeAY1Q09g= +github.com/mikefarah/yq/v4 v4.48.1 h1:xfw9oDxur6z+friOtMAuzSvQ9LPLb0LRDsl1e36DGCo= +github.com/mikefarah/yq/v4 v4.48.1/go.mod h1:NbCTmYAY+KjwCZemVDqLxSAeZKjXFK04lgIiSx13tD0= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= @@ -754,8 +754,8 @@ github.com/scylladb/go-set v1.0.3-0.20200225121959-cc7b2070d91e/go.mod h1:DkpGd7 github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA= github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= -github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= +github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw= +github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= @@ -930,8 +930,8 @@ golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= -golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6 h1:y5zboxd6LQAqYIhHnB48p0ByQ/GnQx2BE33L8BOHQkI= golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6/go.mod h1:U6Lno4MTRCDY+Ba7aCcauB9T60gsv5s4ralQzP72ZoQ= @@ -947,8 +947,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91 golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= -golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= +golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= +golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= golang.org/x/net v0.0.0-20180112015858-5ccada7d0a7b/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -967,8 +967,8 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= -golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= -golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc= golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= @@ -982,8 +982,8 @@ golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= -golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180117170059-2c42eef0765b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1008,16 +1008,16 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= -golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= -golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= -golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20171227012246-e19ae1496984/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -1027,8 +1027,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= -golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1050,8 +1050,8 @@ golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= -golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= -golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= +golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= +golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= golang.org/x/tools/go/expect v0.1.0-deprecated h1:jY2C5HGYR5lqex3gEniOQL0r7Dq5+VGVgY1nudX5lXY= golang.org/x/tools/go/expect v0.1.0-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= @@ -1176,14 +1176,14 @@ sigs.k8s.io/kind v0.30.0 h1:2Xi1KFEfSMm0XDcvKnUt15ZfgRPCT0OnCBbpgh8DztY= sigs.k8s.io/kind v0.30.0/go.mod h1:FSqriGaoTPruiXWfRnUXNykF8r2t+fHtK0P0m1AbGF8= sigs.k8s.io/kubebuilder/docs/book/utils v0.0.0-20211028165026-57688c578b5d h1:KLiQzLW3RZJR19+j4pw2h5iioyAyqCkDBEAFdnGa3N8= sigs.k8s.io/kubebuilder/docs/book/utils v0.0.0-20211028165026-57688c578b5d/go.mod h1:NRdZafr4zSCseLQggdvIMXa7umxf+Q+PJzrj3wFwiGE= -sigs.k8s.io/kustomize/api v0.20.1 h1:iWP1Ydh3/lmldBnH/S5RXgT98vWYMaTUL1ADcr+Sv7I= -sigs.k8s.io/kustomize/api v0.20.1/go.mod h1:t6hUFxO+Ph0VxIk1sKp1WS0dOjbPCtLJ4p8aADLwqjM= -sigs.k8s.io/kustomize/cmd/config v0.20.1 h1:4APUORmZe2BYrsqgGfEKdd/r7gM6i43egLrUzilpiFo= -sigs.k8s.io/kustomize/cmd/config v0.20.1/go.mod h1:R7rQ8kxknVlXWVUIbxWtMgu8DCCNVtl8V0KrmeVd/KE= -sigs.k8s.io/kustomize/kustomize/v5 v5.7.1 h1:sYJsarwy/SDJfjjLMUqwFDGPwzUtMOQ1i1Ed49+XSbw= -sigs.k8s.io/kustomize/kustomize/v5 v5.7.1/go.mod h1:+5/SrBcJ4agx1SJknGuR/c9thwRSKLxnKoI5BzXFaLU= -sigs.k8s.io/kustomize/kyaml v0.20.1 h1:PCMnA2mrVbRP3NIB6v9kYCAc38uvFLVs8j/CD567A78= -sigs.k8s.io/kustomize/kyaml v0.20.1/go.mod h1:0EmkQHRUsJxY8Ug9Niig1pUMSCGHxQ5RklbpV/Ri6po= +sigs.k8s.io/kustomize/api v0.21.0 h1:I7nry5p8iDJbuRdYS7ez8MUvw7XVNPcIP5GkzzuXIIQ= +sigs.k8s.io/kustomize/api v0.21.0/go.mod h1:XGVQuR5n2pXKWbzXHweZU683pALGw/AMVO4zU4iS8SE= +sigs.k8s.io/kustomize/cmd/config v0.21.0 h1:ikLtzcNK9isBqSaXXhAg7LRCTNKdp70z5v/c4Y55DOw= +sigs.k8s.io/kustomize/cmd/config v0.21.0/go.mod h1:oxa6eRzeLWUcE7M3Rmio29Sfc4KpqGspHur3GjOYqNA= +sigs.k8s.io/kustomize/kustomize/v5 v5.8.0 h1:CCIJK7z/xJOlkXOaDOcL2jprV53a/eloiL02wg7oJJs= +sigs.k8s.io/kustomize/kustomize/v5 v5.8.0/go.mod h1:qewGAExYZK9LbPPbnJMPK5HQ8nsdxRzpclIg0qslzDo= +sigs.k8s.io/kustomize/kyaml v0.21.0 h1:7mQAf3dUwf0wBerWJd8rXhVcnkk5Tvn/q91cGkaP6HQ= +sigs.k8s.io/kustomize/kyaml v0.21.0/go.mod h1:hmxADesM3yUN2vbA5z1/YTBnzLJ1dajdqpQonwBL1FQ= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= diff --git a/hack/tools/third_party/conversion-gen/LICENSE b/hack/tools/third_party/conversion-gen/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/hack/tools/third_party/conversion-gen/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/hack/tools/third_party/conversion-gen/README.md b/hack/tools/third_party/conversion-gen/README.md deleted file mode 100644 index e03c6bf55e..0000000000 --- a/hack/tools/third_party/conversion-gen/README.md +++ /dev/null @@ -1,24 +0,0 @@ -# code-generator - -Golang code-generators used to implement [Kubernetes-style API types](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md). - -## Purpose - -These code-generators can be used -- in the context of [CustomResourceDefinition](https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/) to build native, versioned clients, - informers and other helpers -- in the context of [User-provider API Servers](https://github.com/kubernetes/apiserver) to build conversions between internal and versioned types, defaulters, protobuf codecs, - internal and versioned clients and informers. - -## Resources -- The example [sample controller](https://github.com/kubernetes/sample-controller) shows a code example of a controller that uses the clients, listers and informers generated by this library. -- The article [Kubernetes Deep Dive: Code Generation for CustomResources](https://blog.openshift.com/kubernetes-deep-dive-code-generation-customresources/) gives a step by step instruction on how to use this library. - -## Compatibility - -HEAD of this repo will match HEAD of k8s.io/apiserver, k8s.io/apimachinery, and k8s.io/client-go. - -## Where does it come from? - -`code-generator` is synced from https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/code-generator. -Code changes are made in that location, merged into `k8s.io/kubernetes` and later synced here. diff --git a/hack/tools/third_party/conversion-gen/generators/conversion.go b/hack/tools/third_party/conversion-gen/generators/conversion.go deleted file mode 100644 index b2c9f695ed..0000000000 --- a/hack/tools/third_party/conversion-gen/generators/conversion.go +++ /dev/null @@ -1,1210 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package generators - -import ( - "bytes" - "fmt" - "io" - "path" - "reflect" - "sort" - "strings" - - "k8s.io/code-generator/cmd/conversion-gen/args" - "k8s.io/gengo/v2" - "k8s.io/gengo/v2/generator" - "k8s.io/gengo/v2/namer" - "k8s.io/gengo/v2/types" - "k8s.io/klog/v2" -) - -// These are the comment tags that carry parameters for conversion generation. -const ( - // e.g., "+k8s:conversion-gen=" in doc.go, where is the - // import path of the package the peer types are defined in. - // e.g., "+k8s:conversion-gen=false" in a type's comment will let - // conversion-gen skip that type. - tagName = "k8s:conversion-gen" - // e.g. "+k8s:conversion-gen:explicit-from=net/url.Values" in the type comment - // will result in generating conversion from net/url.Values. - explicitFromTagName = "k8s:conversion-gen:explicit-from" - // e.g., "+k8s:conversion-gen-external-types=" in doc.go, where - // is the relative path to the package the types are defined in. - externalTypesTagName = "k8s:conversion-gen-external-types" -) - -func extractTag(comments []string) []string { - return gengo.ExtractCommentTags("+", comments)[tagName] -} - -func extractExplicitFromTag(comments []string) []string { - return gengo.ExtractCommentTags("+", comments)[explicitFromTagName] -} - -func extractExternalTypesTag(comments []string) []string { - return gengo.ExtractCommentTags("+", comments)[externalTypesTagName] -} - -func isCopyOnly(comments []string) bool { - values := gengo.ExtractCommentTags("+", comments)["k8s:conversion-fn"] - return len(values) == 1 && values[0] == "copy-only" -} - -func isDrop(comments []string) bool { - values := gengo.ExtractCommentTags("+", comments)["k8s:conversion-fn"] - return len(values) == 1 && values[0] == "drop" -} - -// TODO: This is created only to reduce number of changes in a single PR. -// Remove it and use PublicNamer instead. -func conversionNamer() *namer.NameStrategy { - return &namer.NameStrategy{ - Join: func(pre string, in []string, post string) string { - return strings.Join(in, "_") - }, - PrependPackageNames: 1, - } -} - -func defaultFnNamer() *namer.NameStrategy { - return &namer.NameStrategy{ - Prefix: "SetDefaults_", - Join: func(pre string, in []string, post string) string { - return pre + strings.Join(in, "_") + post - }, - } -} - -// NameSystems returns the name system used by the generators in this package. -func NameSystems() namer.NameSystems { - return namer.NameSystems{ - "public": conversionNamer(), - "raw": namer.NewRawNamer("", nil), - "defaultfn": defaultFnNamer(), - } -} - -// DefaultNameSystem returns the default name system for ordering the types to be -// processed by the generators in this package. -func DefaultNameSystem() string { - return "public" -} - -func getPeerTypeFor(context *generator.Context, t *types.Type, potenialPeerPkgs []string) *types.Type { - for _, ppp := range potenialPeerPkgs { - p := context.Universe.Package(ppp) - if p == nil { - continue - } - if p.Has(t.Name.Name) { - return p.Type(t.Name.Name) - } - } - return nil -} - -type conversionPair struct { - inType *types.Type - outType *types.Type -} - -// All of the types in conversions map are of type "DeclarationOf" with -// the underlying type being "Func". -type conversionFuncMap map[conversionPair]*types.Type - -// Returns all manually-defined conversion functions in the package. -func getManualConversionFunctions(context *generator.Context, pkg *types.Package, manualMap conversionFuncMap) { - if pkg == nil { - klog.Warning("Skipping nil package passed to getManualConversionFunctions") - return - } - klog.V(3).Infof("Scanning for conversion functions in %v", pkg.Path) - - scopeName := types.Ref(conversionPackagePath, "Scope").Name.String() - errorName := types.Ref("", "error").Name.String() - buffer := &bytes.Buffer{} - sw := generator.NewSnippetWriter(buffer, context, "$", "$") - - for _, f := range pkg.Functions { - if f.Underlying == nil || f.Underlying.Kind != types.Func { - klog.Errorf("Malformed function: %#v", f) - continue - } - if f.Underlying.Signature == nil { - klog.Errorf("Function without signature: %#v", f) - continue - } - klog.V(6).Infof("Considering function %s", f.Name) - signature := f.Underlying.Signature - // Check whether the function is conversion function. - // Note that all of them have signature: - // func Convert_inType_To_outType(inType, outType, conversion.Scope) error - if signature.Receiver != nil { - klog.V(6).Infof("%s has a receiver", f.Name) - continue - } - if len(signature.Parameters) != 3 || signature.Parameters[2].Name != scopeName { - klog.V(6).Infof("%s has wrong parameters", f.Name) - continue - } - if len(signature.Results) != 1 || signature.Results[0].Name != errorName { - klog.V(6).Infof("%s has wrong results", f.Name) - continue - } - inType := signature.Parameters[0] - outType := signature.Parameters[1] - if inType.Type.Kind != types.Pointer || outType.Type.Kind != types.Pointer { - klog.V(6).Infof("%s has wrong parameter types", f.Name) - continue - } - // Now check if the name satisfies the convention. - // TODO: This should call the Namer directly. - args := argsFromType(inType.Type.Elem, outType.Type.Elem) - sw.Do("Convert_$.inType|public$_To_$.outType|public$", args) - if f.Name.Name == buffer.String() { - klog.V(2).Infof("Found conversion function %s", f.Name) - key := conversionPair{inType.Type.Elem, outType.Type.Elem} - // We might scan the same package twice, and that's OK. - // @randomvariable: Temporarily disable duplicate static conversion detection. - // @nrb: This is done because both the stable and experimental APIs have types of the same version (v1beta2) & type converting to v1beta1 equivalents. - // This conversion tool does not take the full package name into account when generating the conversion functions. - // if v, ok := manualMap[key]; ok && v != nil && v.Name.Package != pkg.Path { - // panic(fmt.Sprintf("duplicate static conversion defined: %s -> %s from:\n%s.%s\n%s.%s", key.inType, key.outType, v.Name.Package, v.Name.Name, f.Name.Package, f.Name.Name)) - // } - manualMap[key] = f - } else { - // prevent user error when they don't get the correct conversion signature - if strings.HasPrefix(f.Name.Name, "Convert_") { - klog.Errorf("Rename function %s %s -> %s to match expected conversion signature", f.Name.Package, f.Name.Name, buffer.String()) - } - klog.V(3).Infof("%s has wrong name", f.Name) - } - buffer.Reset() - } -} - -func GetTargets(context *generator.Context, args *args.Args) []generator.Target { - boilerplate, err := gengo.GoBoilerplate(args.GoHeaderFile, gengo.StdBuildTag, gengo.StdGeneratedBy) - if err != nil { - klog.Fatalf("Failed loading boilerplate: %v", err) - } - - targets := []generator.Target{} - - // Accumulate pre-existing conversion functions. - // TODO: This is too ad-hoc. We need a better way. - manualConversions := conversionFuncMap{} - - // Record types that are memory equivalent. A type is memory equivalent - // if it has the same memory layout and no nested manual conversion is - // defined. - // TODO: in the future, relax the nested manual conversion requirement - // if we can show that a large enough types are memory identical but - // have non-trivial conversion - memoryEquivalentTypes := equalMemoryTypes{} - - // First load other "input" packages. We do this as a single call because - // it is MUCH faster. - filteredInputs := make([]string, 0, len(context.Inputs)) - otherPkgs := make([]string, 0, len(context.Inputs)) - pkgToPeers := map[string][]string{} - pkgToExternal := map[string]string{} - for _, i := range context.Inputs { - klog.V(3).Infof("pre-processing pkg %q", i) - - pkg := context.Universe[i] - - // Only generate conversions for packages which explicitly request it - // by specifying one or more "+k8s:conversion-gen=" - // in their doc.go file. - peerPkgs := extractTag(pkg.Comments) - if peerPkgs == nil { - klog.V(3).Infof(" no tag") - continue - } - klog.V(3).Infof(" tags: %q", peerPkgs) - if len(peerPkgs) == 1 && peerPkgs[0] == "false" { - // If a single +k8s:conversion-gen=false tag is defined, we still want - // the generator to fire for this package for explicit conversions, but - // we are clearing the peerPkgs to not generate any standard conversions. - peerPkgs = nil - } else { - // Save peers for each input - pkgToPeers[i] = peerPkgs - } - otherPkgs = append(otherPkgs, peerPkgs...) - // Keep this one for further processing. - filteredInputs = append(filteredInputs, i) - - // if the external types are not in the same package where the - // conversion functions to be generated - externalTypesValues := extractExternalTypesTag(pkg.Comments) - if externalTypesValues != nil { - if len(externalTypesValues) != 1 { - klog.Fatalf(" expect only one value for %q tag, got: %q", externalTypesTagName, externalTypesValues) - } - externalTypes := externalTypesValues[0] - klog.V(3).Infof(" external types tags: %q", externalTypes) - otherPkgs = append(otherPkgs, externalTypes) - pkgToExternal[i] = externalTypes - } else { - pkgToExternal[i] = i - } - } - - // Make sure explicit peer-packages are added. - peers := args.BasePeerDirs - peers = append(peers, args.ExtraPeerDirs...) - if expanded, err := context.FindPackages(peers...); err != nil { - klog.Fatalf("cannot find peer packages: %v", err) - } else { - otherPkgs = append(otherPkgs, expanded...) - // for each pkg, add these extras, too - for k := range pkgToPeers { - pkgToPeers[k] = append(pkgToPeers[k], expanded...) - } - } - - if len(otherPkgs) > 0 { - if _, err := context.LoadPackages(otherPkgs...); err != nil { - klog.Fatalf("cannot load packages: %v", err) - } - } - // update context.Order to the latest context.Universe - orderer := namer.Orderer{Namer: namer.NewPublicNamer(1)} - context.Order = orderer.OrderUniverse(context.Universe) - - // Look for conversion functions in the peer-packages. - for _, pp := range otherPkgs { - p := context.Universe[pp] - if p == nil { - klog.Fatalf("failed to find pkg: %s", pp) - } - getManualConversionFunctions(context, p, manualConversions) - } - - // We are generating conversions only for packages that are explicitly - // passed as InputDir. - for _, i := range filteredInputs { - klog.V(3).Infof("considering pkg %q", i) - pkg := context.Universe[i] - // typesPkg is where the versioned types are defined. Sometimes it is - // different from pkg. For example, kubernetes core/v1 types are defined - // in k8s.io/api/core/v1, while pkg is at pkg/api/v1. - typesPkg := pkg - - // Add conversion and defaulting functions. - getManualConversionFunctions(context, pkg, manualConversions) - - // Find the right input pkg, which might not be this one. - externalTypes := pkgToExternal[i] - typesPkg = context.Universe[externalTypes] - - unsafeEquality := TypesEqual(memoryEquivalentTypes) - if args.SkipUnsafe { - unsafeEquality = noEquality{} - } - - targets = append(targets, - &generator.SimpleTarget{ - PkgName: path.Base(pkg.Path), - PkgPath: pkg.Path, - PkgDir: pkg.Dir, // output pkg is the same as the input - HeaderComment: boilerplate, - FilterFunc: func(c *generator.Context, t *types.Type) bool { - return t.Name.Package == typesPkg.Path - }, - GeneratorsFunc: func(c *generator.Context) (generators []generator.Generator) { - return []generator.Generator{ - NewGenConversion(args.OutputFile, typesPkg.Path, pkg.Path, manualConversions, pkgToPeers[pkg.Path], unsafeEquality), - } - }, - }) - } - - // If there is a manual conversion defined between two types, exclude it - // from being a candidate for unsafe conversion - for k, v := range manualConversions { - if isCopyOnly(v.CommentLines) { - klog.V(4).Infof("Conversion function %s will not block memory copy because it is copy-only", v.Name) - continue - } - // this type should be excluded from all equivalence, because the converter must be called. - memoryEquivalentTypes.Skip(k.inType, k.outType) - } - - return targets -} - -type equalMemoryTypes map[conversionPair]bool - -func (e equalMemoryTypes) Skip(a, b *types.Type) { - e[conversionPair{a, b}] = false - e[conversionPair{b, a}] = false -} - -func (e equalMemoryTypes) Equal(a, b *types.Type) bool { - // alreadyVisitedTypes holds all the types that have already been checked in the structural type recursion. - alreadyVisitedTypes := make(map[*types.Type]bool) - return e.cachingEqual(a, b, alreadyVisitedTypes) -} - -func (e equalMemoryTypes) cachingEqual(a, b *types.Type, alreadyVisitedTypes map[*types.Type]bool) bool { - if a == b { - return true - } - if equal, ok := e[conversionPair{a, b}]; ok { - return equal - } - if equal, ok := e[conversionPair{b, a}]; ok { - return equal - } - result := e.equal(a, b, alreadyVisitedTypes) - e[conversionPair{a, b}] = result - e[conversionPair{b, a}] = result - return result -} - -func (e equalMemoryTypes) equal(a, b *types.Type, alreadyVisitedTypes map[*types.Type]bool) bool { - in, out := unwrapAlias(a), unwrapAlias(b) - switch { - case in == out: - return true - case in.Kind == out.Kind: - // if the type exists already, return early to avoid recursion - if alreadyVisitedTypes[in] { - return true - } - alreadyVisitedTypes[in] = true - - switch in.Kind { - case types.Struct: - if len(in.Members) != len(out.Members) { - return false - } - for i, inMember := range in.Members { - outMember := out.Members[i] - if !e.cachingEqual(inMember.Type, outMember.Type, alreadyVisitedTypes) { - return false - } - } - return true - case types.Pointer: - return e.cachingEqual(in.Elem, out.Elem, alreadyVisitedTypes) - case types.Map: - return e.cachingEqual(in.Key, out.Key, alreadyVisitedTypes) && e.cachingEqual(in.Elem, out.Elem, alreadyVisitedTypes) - case types.Slice: - return e.cachingEqual(in.Elem, out.Elem, alreadyVisitedTypes) - case types.Interface: - // TODO: determine whether the interfaces are actually equivalent - for now, they must have the - // same type. - return false - case types.Builtin: - return in.Name.Name == out.Name.Name - } - } - return false -} - -func findMember(t *types.Type, name string) (types.Member, bool) { - if t.Kind != types.Struct { - return types.Member{}, false - } - for _, member := range t.Members { - if member.Name == name { - return member, true - } - } - return types.Member{}, false -} - -// unwrapAlias recurses down aliased types to find the bedrock type. -func unwrapAlias(in *types.Type) *types.Type { - for in.Kind == types.Alias { - in = in.Underlying - } - return in -} - -const ( - runtimePackagePath = "k8s.io/apimachinery/pkg/runtime" - conversionPackagePath = "k8s.io/apimachinery/pkg/conversion" -) - -type noEquality struct{} - -func (noEquality) Equal(_, _ *types.Type) bool { return false } - -type TypesEqual interface { - Equal(a, b *types.Type) bool -} - -// genConversion produces a file with a autogenerated conversions. -type genConversion struct { - generator.GoGenerator - // the package that contains the types that conversion func are going to be - // generated for - typesPackage string - // the package that the conversion funcs are going to be output to - outputPackage string - // packages that contain the peer of types in typesPacakge - peerPackages []string - manualConversions conversionFuncMap - imports namer.ImportTracker - types []*types.Type - explicitConversions []conversionPair - skippedFields map[*types.Type][]string - useUnsafe TypesEqual -} - -func NewGenConversion(outputFilename, typesPackage, outputPackage string, manualConversions conversionFuncMap, peerPkgs []string, useUnsafe TypesEqual) generator.Generator { - return &genConversion{ - GoGenerator: generator.GoGenerator{ - OutputFilename: outputFilename, - }, - typesPackage: typesPackage, - outputPackage: outputPackage, - peerPackages: peerPkgs, - manualConversions: manualConversions, - imports: generator.NewImportTracker(), - types: []*types.Type{}, - explicitConversions: []conversionPair{}, - skippedFields: map[*types.Type][]string{}, - useUnsafe: useUnsafe, - } -} - -func (g *genConversion) Namers(c *generator.Context) namer.NameSystems { - // Have the raw namer for this file track what it imports. - return namer.NameSystems{ - "raw": namer.NewRawNamer(g.outputPackage, g.imports), - "publicIT": &namerPlusImportTracking{ - delegate: conversionNamer(), - tracker: g.imports, - }, - } -} - -type namerPlusImportTracking struct { - delegate namer.Namer - tracker namer.ImportTracker -} - -func (n *namerPlusImportTracking) Name(t *types.Type) string { - n.tracker.AddType(t) - return n.delegate.Name(t) -} - -func (g *genConversion) convertibleOnlyWithinPackage(inType, outType *types.Type) bool { - var t *types.Type - var other *types.Type - if inType.Name.Package == g.typesPackage { - t, other = inType, outType - } else { - t, other = outType, inType - } - - if t.Name.Package != g.typesPackage { - return false - } - // If the type has opted out, skip it. - tagvals := extractTag(t.CommentLines) - if tagvals != nil { - if tagvals[0] != "false" { - klog.Fatalf("Type %v: unsupported %s value: %q", t, tagName, tagvals[0]) - } - klog.V(2).Infof("type %v requests no conversion generation, skipping", t) - return false - } - // TODO: Consider generating functions for other kinds too. - if t.Kind != types.Struct { - return false - } - // Also, filter out private types. - if namer.IsPrivateGoName(other.Name.Name) { - return false - } - return true -} - -func getExplicitFromTypes(t *types.Type) []types.Name { - comments := t.SecondClosestCommentLines - comments = append(comments, t.CommentLines...) - paths := extractExplicitFromTag(comments) - result := []types.Name{} - for _, path := range paths { - items := strings.Split(path, ".") - if len(items) != 2 { - klog.Errorf("Unexpected k8s:conversion-gen:explicit-from tag: %s", path) - continue - } - switch { - case items[0] == "net/url" && items[1] == "Values": - default: - klog.Fatalf("Not supported k8s:conversion-gen:explicit-from tag: %s", path) - } - result = append(result, types.Name{Package: items[0], Name: items[1]}) - } - return result -} - -func (g *genConversion) Filter(c *generator.Context, t *types.Type) bool { - convertibleWithPeer := func() bool { - peerType := getPeerTypeFor(c, t, g.peerPackages) - if peerType == nil { - return false - } - if !g.convertibleOnlyWithinPackage(t, peerType) { - return false - } - g.types = append(g.types, t) - return true - }() - - explicitlyConvertible := func() bool { - inTypes := getExplicitFromTypes(t) - if len(inTypes) == 0 { - return false - } - for i := range inTypes { - pair := conversionPair{ - inType: &types.Type{Name: inTypes[i]}, - outType: t, - } - g.explicitConversions = append(g.explicitConversions, pair) - } - return true - }() - - return convertibleWithPeer || explicitlyConvertible -} - -func (g *genConversion) isOtherPackage(pkg string) bool { - if pkg == g.outputPackage { - return false - } - if strings.HasSuffix(pkg, `"`+g.outputPackage+`"`) { - return false - } - return true -} - -func (g *genConversion) Imports(c *generator.Context) (imports []string) { - var importLines []string - for _, singleImport := range g.imports.ImportLines() { - if g.isOtherPackage(singleImport) { - importLines = append(importLines, singleImport) - } - } - return importLines -} - -func argsFromType(inType, outType *types.Type) generator.Args { - return generator.Args{ - "inType": inType, - "outType": outType, - } -} - -const nameTmpl = "Convert_$.inType|publicIT$_To_$.outType|publicIT$" - -func (g *genConversion) preexists(inType, outType *types.Type) (*types.Type, bool) { - function, ok := g.manualConversions[conversionPair{inType, outType}] - return function, ok -} - -func (g *genConversion) Init(c *generator.Context, w io.Writer) error { - klogV := klog.V(6) - if klogV.Enabled() { - if m, ok := g.useUnsafe.(equalMemoryTypes); ok { - var result []string - klogV.Info("All objects without identical memory layout:") - for k, v := range m { - if v { - continue - } - result = append(result, fmt.Sprintf(" %s -> %s = %t", k.inType, k.outType, v)) - } - sort.Strings(result) - for _, s := range result { - klogV.Info(s) - } - } - } - sw := generator.NewSnippetWriter(w, c, "$", "$") - sw.Do("func init() {\n", nil) - sw.Do("localSchemeBuilder.Register(RegisterConversions)\n", nil) - sw.Do("}\n", nil) - - scheme := c.Universe.Type(types.Name{Package: runtimePackagePath, Name: "Scheme"}) - schemePtr := &types.Type{ - Kind: types.Pointer, - Elem: scheme, - } - sw.Do("// RegisterConversions adds conversion functions to the given scheme.\n", nil) - sw.Do("// Public to allow building arbitrary schemes.\n", nil) - sw.Do("func RegisterConversions(s $.|raw$) error {\n", schemePtr) - for _, t := range g.types { - peerType := getPeerTypeFor(c, t, g.peerPackages) - if _, found := g.preexists(t, peerType); !found { - args := argsFromType(t, peerType).With("Scope", types.Ref(conversionPackagePath, "Scope")) - sw.Do("if err := s.AddGeneratedConversionFunc((*$.inType|raw$)(nil), (*$.outType|raw$)(nil), func(a, b interface{}, scope $.Scope|raw$) error { return "+nameTmpl+"(a.(*$.inType|raw$), b.(*$.outType|raw$), scope) }); err != nil { return err }\n", args) - } - if _, found := g.preexists(peerType, t); !found { - args := argsFromType(peerType, t).With("Scope", types.Ref(conversionPackagePath, "Scope")) - sw.Do("if err := s.AddGeneratedConversionFunc((*$.inType|raw$)(nil), (*$.outType|raw$)(nil), func(a, b interface{}, scope $.Scope|raw$) error { return "+nameTmpl+"(a.(*$.inType|raw$), b.(*$.outType|raw$), scope) }); err != nil { return err }\n", args) - } - } - - for i := range g.explicitConversions { - args := argsFromType(g.explicitConversions[i].inType, g.explicitConversions[i].outType).With("Scope", types.Ref(conversionPackagePath, "Scope")) - sw.Do("if err := s.AddGeneratedConversionFunc((*$.inType|raw$)(nil), (*$.outType|raw$)(nil), func(a, b interface{}, scope $.Scope|raw$) error { return "+nameTmpl+"(a.(*$.inType|raw$), b.(*$.outType|raw$), scope) }); err != nil { return err }\n", args) - } - - var pairs []conversionPair - for pair, t := range g.manualConversions { - if t.Name.Package != g.outputPackage { - continue - } - pairs = append(pairs, pair) - } - // sort by name of the conversion function - sort.Slice(pairs, func(i, j int) bool { - return g.manualConversions[pairs[i]].Name.Name < g.manualConversions[pairs[j]].Name.Name - }) - for _, pair := range pairs { - args := argsFromType(pair.inType, pair.outType).With("Scope", types.Ref(conversionPackagePath, "Scope")).With("fn", g.manualConversions[pair]) - sw.Do("if err := s.AddConversionFunc((*$.inType|raw$)(nil), (*$.outType|raw$)(nil), func(a, b interface{}, scope $.Scope|raw$) error { return $.fn|raw$(a.(*$.inType|raw$), b.(*$.outType|raw$), scope) }); err != nil { return err }\n", args) - } - - sw.Do("return nil\n", nil) - sw.Do("}\n\n", nil) - return sw.Error() -} - -func (g *genConversion) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error { - klog.V(5).Infof("generating for type %v", t) - sw := generator.NewSnippetWriter(w, c, "$", "$") - - if peerType := getPeerTypeFor(c, t, g.peerPackages); peerType != nil { - g.generateConversion(t, peerType, sw) - g.generateConversion(peerType, t, sw) - } - - for _, inTypeName := range getExplicitFromTypes(t) { - inPkg, ok := c.Universe[inTypeName.Package] - if !ok { - klog.Errorf("Unrecognized package: %s", inTypeName.Package) - continue - } - inType, ok := inPkg.Types[inTypeName.Name] - if !ok { - klog.Errorf("Unrecognized type in package %s: %s", inTypeName.Package, inTypeName.Name) - continue - } - switch { - case inType.Name.Package == "net/url" && inType.Name.Name == "Values": - g.generateFromURLValues(inType, t, sw) - default: - klog.Errorf("Not supported input type: %#v", inType.Name) - } - } - - return sw.Error() -} - -func (g *genConversion) generateConversion(inType, outType *types.Type, sw *generator.SnippetWriter) { - args := argsFromType(inType, outType). - With("Scope", types.Ref(conversionPackagePath, "Scope")) - - sw.Do("func auto"+nameTmpl+"(in *$.inType|raw$, out *$.outType|raw$, s $.Scope|raw$) error {\n", args) - g.generateFor(inType, outType, sw) - sw.Do("return nil\n", nil) - sw.Do("}\n\n", nil) - - if _, found := g.preexists(inType, outType); found { - // There is a public manual Conversion method: use it. - } else if skipped := g.skippedFields[inType]; len(skipped) != 0 { - // The inType had some fields we could not generate. - klog.Errorf("Warning: could not find nor generate a final Conversion function for %v -> %v", inType, outType) - klog.Errorf(" the following fields need manual conversion:") - for _, f := range skipped { - klog.Errorf(" - %v", f) - } - } else { - // Emit a public conversion function. - sw.Do("// "+nameTmpl+" is an autogenerated conversion function.\n", args) - sw.Do("func "+nameTmpl+"(in *$.inType|raw$, out *$.outType|raw$, s $.Scope|raw$) error {\n", args) - sw.Do("return auto"+nameTmpl+"(in, out, s)\n", args) - sw.Do("}\n\n", nil) - } -} - -// we use the system of shadowing 'in' and 'out' so that the same code is valid -// at any nesting level. This makes the autogenerator easy to understand, and -// the compiler shouldn't care. -func (g *genConversion) generateFor(inType, outType *types.Type, sw *generator.SnippetWriter) { - klog.V(4).Infof("generating %v -> %v", inType, outType) - var f func(*types.Type, *types.Type, *generator.SnippetWriter) - - switch inType.Kind { - case types.Builtin: - f = g.doBuiltin - case types.Map: - f = g.doMap - case types.Slice: - f = g.doSlice - case types.Struct: - f = g.doStruct - case types.Pointer: - f = g.doPointer - case types.Alias: - f = g.doAlias - default: - f = g.doUnknown - } - - f(inType, outType, sw) -} - -func (g *genConversion) doBuiltin(inType, outType *types.Type, sw *generator.SnippetWriter) { - if inType == outType { - sw.Do("*out = *in\n", nil) - } else { - sw.Do("*out = $.|raw$(*in)\n", outType) - } -} - -func (g *genConversion) doMap(inType, outType *types.Type, sw *generator.SnippetWriter) { - sw.Do("*out = make($.|raw$, len(*in))\n", outType) - if isDirectlyAssignable(inType.Key, outType.Key) { - sw.Do("for key, val := range *in {\n", nil) - if isDirectlyAssignable(inType.Elem, outType.Elem) { - if inType.Key == outType.Key { - sw.Do("(*out)[key] = ", nil) - } else { - sw.Do("(*out)[$.|raw$(key)] = ", outType.Key) - } - if inType.Elem == outType.Elem { - sw.Do("val\n", nil) - } else { - sw.Do("$.|raw$(val)\n", outType.Elem) - } - } else { - conversionExists := true - if function, ok := g.preexists(inType.Elem, outType.Elem); ok { - sw.Do("newVal := new($.|raw$)\n", outType.Elem) - sw.Do("if err := $.|raw$(&val, newVal, s); err != nil {\n", function) - } else if g.convertibleOnlyWithinPackage(inType.Elem, outType.Elem) { - sw.Do("newVal := new($.|raw$)\n", outType.Elem) - sw.Do("if err := "+nameTmpl+"(&val, newVal, s); err != nil {\n", argsFromType(inType.Elem, outType.Elem)) - } else { - args := argsFromType(inType.Elem, outType.Elem) - sw.Do("// FIXME: Provide conversion function to convert $.inType|raw$ to $.outType|raw$\n", args) - sw.Do("compileErrorOnMissingConversion()\n", nil) - conversionExists = false - } - if conversionExists { - sw.Do("return err\n", nil) - sw.Do("}\n", nil) - if inType.Key == outType.Key { - sw.Do("(*out)[key] = *newVal\n", nil) - } else { - sw.Do("(*out)[$.|raw$(key)] = *newVal\n", outType.Key) - } - } - } - } else { - // TODO: Implement it when necessary. - sw.Do("for range *in {\n", nil) - sw.Do("// FIXME: Converting unassignable keys unsupported $.|raw$\n", inType.Key) - } - sw.Do("}\n", nil) -} - -func (g *genConversion) doSlice(inType, outType *types.Type, sw *generator.SnippetWriter) { - sw.Do("*out = make($.|raw$, len(*in))\n", outType) - if inType.Elem == outType.Elem && inType.Elem.Kind == types.Builtin { - sw.Do("copy(*out, *in)\n", nil) - } else { - sw.Do("for i := range *in {\n", nil) - if isDirectlyAssignable(inType.Elem, outType.Elem) { - if inType.Elem == outType.Elem { - sw.Do("(*out)[i] = (*in)[i]\n", nil) - } else { - sw.Do("(*out)[i] = $.|raw$((*in)[i])\n", outType.Elem) - } - } else { - conversionExists := true - if function, ok := g.preexists(inType.Elem, outType.Elem); ok { - sw.Do("if err := $.|raw$(&(*in)[i], &(*out)[i], s); err != nil {\n", function) - } else if g.convertibleOnlyWithinPackage(inType.Elem, outType.Elem) { - sw.Do("if err := "+nameTmpl+"(&(*in)[i], &(*out)[i], s); err != nil {\n", argsFromType(inType.Elem, outType.Elem)) - } else { - args := argsFromType(inType.Elem, outType.Elem) - sw.Do("// FIXME: Provide conversion function to convert $.inType|raw$ to $.outType|raw$\n", args) - sw.Do("compileErrorOnMissingConversion()\n", nil) - conversionExists = false - } - if conversionExists { - sw.Do("return err\n", nil) - sw.Do("}\n", nil) - } - } - sw.Do("}\n", nil) - } -} - -func (g *genConversion) doStruct(inType, outType *types.Type, sw *generator.SnippetWriter) { - for _, inMember := range inType.Members { - if tagvals := extractTag(inMember.CommentLines); tagvals != nil && tagvals[0] == "false" { - // This field is excluded from conversion. - sw.Do("// INFO: in."+inMember.Name+" opted out of conversion generation\n", nil) - continue - } - outMember, found := findMember(outType, inMember.Name) - if !found { - // This field doesn't exist in the peer. - sw.Do("// WARNING: in."+inMember.Name+" requires manual conversion: does not exist in peer-type\n", nil) - g.skippedFields[inType] = append(g.skippedFields[inType], inMember.Name) - continue - } - - inMemberType, outMemberType := inMember.Type, outMember.Type - // create a copy of both underlying types but give them the top level alias name (since aliases - // are assignable) - if underlying := unwrapAlias(inMemberType); underlying != inMemberType { - copied := *underlying - copied.Name = inMemberType.Name - inMemberType = &copied - } - if underlying := unwrapAlias(outMemberType); underlying != outMemberType { - copied := *underlying - copied.Name = outMemberType.Name - outMemberType = &copied - } - - args := argsFromType(inMemberType, outMemberType).With("name", inMember.Name) - - // try a direct memory copy for any type that has exactly equivalent values - if g.useUnsafe.Equal(inMemberType, outMemberType) { - args = args. - With("Pointer", types.Ref("unsafe", "Pointer")). - With("SliceHeader", types.Ref("reflect", "SliceHeader")) - switch inMemberType.Kind { - case types.Pointer: - sw.Do("out.$.name$ = ($.outType|raw$)($.Pointer|raw$(in.$.name$))\n", args) - continue - case types.Map: - sw.Do("out.$.name$ = *(*$.outType|raw$)($.Pointer|raw$(&in.$.name$))\n", args) - continue - case types.Slice: - sw.Do("out.$.name$ = *(*$.outType|raw$)($.Pointer|raw$(&in.$.name$))\n", args) - continue - } - } - - // check based on the top level name, not the underlying names - if function, ok := g.preexists(inMember.Type, outMember.Type); ok { - if isDrop(function.CommentLines) { - continue - } - // copy-only functions that are directly assignable can be inlined instead of invoked. - // As an example, conversion functions exist that allow types with private fields to be - // correctly copied between types. These functions are equivalent to a memory assignment, - // and are necessary for the reflection path, but should not block memory conversion. - // Convert_unversioned_Time_to_unversioned_Time is an example of this logic. - if !isCopyOnly(function.CommentLines) || !g.isFastConversion(inMemberType, outMemberType) { - args["function"] = function - sw.Do("if err := $.function|raw$(&in.$.name$, &out.$.name$, s); err != nil {\n", args) - sw.Do("return err\n", nil) - sw.Do("}\n", nil) - continue - } - klog.V(2).Infof("Skipped function %s because it is copy-only and we can use direct assignment", function.Name) - } - - // If we can't auto-convert, punt before we emit any code. - if inMemberType.Kind != outMemberType.Kind { - sw.Do("// WARNING: in."+inMember.Name+" requires manual conversion: inconvertible types ("+ - inMemberType.String()+" vs "+outMemberType.String()+")\n", nil) - g.skippedFields[inType] = append(g.skippedFields[inType], inMember.Name) - continue - } - - switch inMemberType.Kind { - case types.Builtin: - if inMemberType == outMemberType { - sw.Do("out.$.name$ = in.$.name$\n", args) - } else { - sw.Do("out.$.name$ = $.outType|raw$(in.$.name$)\n", args) - } - case types.Map, types.Slice, types.Pointer: - if g.isDirectlyAssignable(inMemberType, outMemberType) { - sw.Do("out.$.name$ = in.$.name$\n", args) - continue - } - - sw.Do("if in.$.name$ != nil {\n", args) - sw.Do("in, out := &in.$.name$, &out.$.name$\n", args) - g.generateFor(inMemberType, outMemberType, sw) - sw.Do("} else {\n", nil) - sw.Do("out.$.name$ = nil\n", args) - sw.Do("}\n", nil) - case types.Struct: - if g.isDirectlyAssignable(inMemberType, outMemberType) { - sw.Do("out.$.name$ = in.$.name$\n", args) - continue - } - conversionExists := true - if g.convertibleOnlyWithinPackage(inMemberType, outMemberType) { - sw.Do("if err := "+nameTmpl+"(&in.$.name$, &out.$.name$, s); err != nil {\n", args) - } else { - args := argsFromType(inMemberType, outMemberType) - sw.Do("// FIXME: Provide conversion function to convert $.inType|raw$ to $.outType|raw$\n", args) - sw.Do("compileErrorOnMissingConversion()\n", nil) - conversionExists = false - } - if conversionExists { - sw.Do("return err\n", nil) - sw.Do("}\n", nil) - } - case types.Alias: - if isDirectlyAssignable(inMemberType, outMemberType) { - if inMemberType == outMemberType { - sw.Do("out.$.name$ = in.$.name$\n", args) - } else { - sw.Do("out.$.name$ = $.outType|raw$(in.$.name$)\n", args) - } - } else { - conversionExists := true - if g.convertibleOnlyWithinPackage(inMemberType, outMemberType) { - sw.Do("if err := "+nameTmpl+"(&in.$.name$, &out.$.name$, s); err != nil {\n", args) - } else { - args := argsFromType(inMemberType, outMemberType) - sw.Do("// FIXME: Provide conversion function to convert $.inType|raw$ to $.outType|raw$\n", args) - sw.Do("compileErrorOnMissingConversion()\n", nil) - conversionExists = false - } - if conversionExists { - sw.Do("return err\n", nil) - sw.Do("}\n", nil) - } - } - default: - conversionExists := true - if g.convertibleOnlyWithinPackage(inMemberType, outMemberType) { - sw.Do("if err := "+nameTmpl+"(&in.$.name$, &out.$.name$, s); err != nil {\n", args) - } else { - args := argsFromType(inMemberType, outMemberType) - sw.Do("// FIXME: Provide conversion function to convert $.inType|raw$ to $.outType|raw$\n", args) - sw.Do("compileErrorOnMissingConversion()\n", nil) - conversionExists = false - } - if conversionExists { - sw.Do("return err\n", nil) - sw.Do("}\n", nil) - } - } - } -} - -func (g *genConversion) isFastConversion(inType, outType *types.Type) bool { - switch inType.Kind { - case types.Builtin: - return true - case types.Map, types.Slice, types.Pointer, types.Struct, types.Alias: - return g.isDirectlyAssignable(inType, outType) - default: - return false - } -} - -func (g *genConversion) isDirectlyAssignable(inType, outType *types.Type) bool { - return unwrapAlias(inType) == unwrapAlias(outType) -} - -func (g *genConversion) doPointer(inType, outType *types.Type, sw *generator.SnippetWriter) { - sw.Do("*out = new($.Elem|raw$)\n", outType) - if isDirectlyAssignable(inType.Elem, outType.Elem) { - if inType.Elem == outType.Elem { - sw.Do("**out = **in\n", nil) - } else { - sw.Do("**out = $.|raw$(**in)\n", outType.Elem) - } - } else { - conversionExists := true - if function, ok := g.preexists(inType.Elem, outType.Elem); ok { - sw.Do("if err := $.|raw$(*in, *out, s); err != nil {\n", function) - } else if g.convertibleOnlyWithinPackage(inType.Elem, outType.Elem) { - sw.Do("if err := "+nameTmpl+"(*in, *out, s); err != nil {\n", argsFromType(inType.Elem, outType.Elem)) - } else { - args := argsFromType(inType.Elem, outType.Elem) - sw.Do("// FIXME: Provide conversion function to convert $.inType|raw$ to $.outType|raw$\n", args) - sw.Do("compileErrorOnMissingConversion()\n", nil) - conversionExists = false - } - if conversionExists { - sw.Do("return err\n", nil) - sw.Do("}\n", nil) - } - } -} - -func (g *genConversion) doAlias(inType, outType *types.Type, sw *generator.SnippetWriter) { - // TODO: Add support for aliases. - g.doUnknown(inType, outType, sw) -} - -func (g *genConversion) doUnknown(inType, outType *types.Type, sw *generator.SnippetWriter) { - sw.Do("// FIXME: Type $.|raw$ is unsupported.\n", inType) -} - -func (g *genConversion) generateFromURLValues(inType, outType *types.Type, sw *generator.SnippetWriter) { - args := generator.Args{ - "inType": inType, - "outType": outType, - "Scope": types.Ref(conversionPackagePath, "Scope"), - } - sw.Do("func auto"+nameTmpl+"(in *$.inType|raw$, out *$.outType|raw$, s $.Scope|raw$) error {\n", args) - for _, outMember := range outType.Members { - if tagvals := extractTag(outMember.CommentLines); tagvals != nil && tagvals[0] == "false" { - // This field is excluded from conversion. - sw.Do("// INFO: in."+outMember.Name+" opted out of conversion generation\n", nil) - continue - } - jsonTag := reflect.StructTag(outMember.Tags).Get("json") - index := strings.Index(jsonTag, ",") - if index == -1 { - index = len(jsonTag) - } - if index == 0 { - memberArgs := generator.Args{ - "name": outMember.Name, - } - sw.Do("// WARNING: Field $.name$ does not have json tag, skipping.\n\n", memberArgs) - continue - } - memberArgs := generator.Args{ - "name": outMember.Name, - "tag": jsonTag[:index], - } - sw.Do("if values, ok := map[string][]string(*in)[\"$.tag$\"]; ok && len(values) > 0 {\n", memberArgs) - g.fromValuesEntry(inType.Underlying.Elem, outMember, sw) - sw.Do("} else {\n", nil) - g.setZeroValue(outMember, sw) - sw.Do("}\n", nil) - } - sw.Do("return nil\n", nil) - sw.Do("}\n\n", nil) - - if _, found := g.preexists(inType, outType); found { - // There is a public manual Conversion method: use it. - } else { - // Emit a public conversion function. - sw.Do("// "+nameTmpl+" is an autogenerated conversion function.\n", args) - sw.Do("func "+nameTmpl+"(in *$.inType|raw$, out *$.outType|raw$, s $.Scope|raw$) error {\n", args) - sw.Do("return auto"+nameTmpl+"(in, out, s)\n", args) - sw.Do("}\n\n", nil) - } -} - -func (g *genConversion) fromValuesEntry(inType *types.Type, outMember types.Member, sw *generator.SnippetWriter) { - memberArgs := generator.Args{ - "name": outMember.Name, - "type": outMember.Type, - } - if function, ok := g.preexists(inType, outMember.Type); ok { - args := memberArgs.With("function", function) - sw.Do("if err := $.function|raw$(&values, &out.$.name$, s); err != nil {\n", args) - sw.Do("return err\n", nil) - sw.Do("}\n", nil) - return - } - switch { - case outMember.Type == types.String: - sw.Do("out.$.name$ = values[0]\n", memberArgs) - case g.useUnsafe.Equal(inType, outMember.Type): - args := memberArgs.With("Pointer", types.Ref("unsafe", "Pointer")) - switch inType.Kind { - case types.Pointer: - sw.Do("out.$.name$ = ($.type|raw$)($.Pointer|raw$(&values))\n", args) - case types.Map, types.Slice: - sw.Do("out.$.name$ = *(*$.type|raw$)($.Pointer|raw$(&values))\n", args) - default: - // TODO: Support other types to allow more auto-conversions. - sw.Do("// FIXME: out.$.name$ is of not yet supported type and requires manual conversion\n", memberArgs) - } - default: - // TODO: Support other types to allow more auto-conversions. - sw.Do("// FIXME: out.$.name$ is of not yet supported type and requires manual conversion\n", memberArgs) - } -} - -func (g *genConversion) setZeroValue(outMember types.Member, sw *generator.SnippetWriter) { - outMemberType := unwrapAlias(outMember.Type) - memberArgs := generator.Args{ - "name": outMember.Name, - "alias": outMember.Type, - "type": outMemberType, - } - - switch outMemberType.Kind { - case types.Builtin: - switch outMemberType { - case types.String: - sw.Do("out.$.name$ = \"\"\n", memberArgs) - case types.Int64, types.Int32, types.Int16, types.Int, types.Uint64, types.Uint32, types.Uint16, types.Uint: - sw.Do("out.$.name$ = 0\n", memberArgs) - case types.Uintptr, types.Byte: - sw.Do("out.$.name$ = 0\n", memberArgs) - case types.Float64, types.Float32, types.Float: - sw.Do("out.$.name$ = 0\n", memberArgs) - case types.Bool: - sw.Do("out.$.name$ = false\n", memberArgs) - default: - sw.Do("// FIXME: out.$.name$ is of unsupported type and requires manual conversion\n", memberArgs) - } - case types.Struct: - if outMemberType == outMember.Type { - sw.Do("out.$.name$ = $.type|raw${}\n", memberArgs) - } else { - sw.Do("out.$.name$ = $.alias|raw$($.type|raw${})\n", memberArgs) - } - case types.Map, types.Slice, types.Pointer: - sw.Do("out.$.name$ = nil\n", memberArgs) - case types.Alias: - // outMemberType was already unwrapped from aliases - so that should never happen. - sw.Do("// FIXME: unexpected error for out.$.name$\n", memberArgs) - case types.Interface, types.Array: - sw.Do("out.$.name$ = nil\n", memberArgs) - default: - sw.Do("// FIXME: out.$.name$ is of unsupported type and requires manual conversion\n", memberArgs) - } -} - -func isDirectlyAssignable(inType, outType *types.Type) bool { - // TODO: This should maybe check for actual assignability between the two - // types, rather than superficial traits that happen to indicate it is - // assignable in the ways we currently use this code. - return inType.IsAssignable() && (inType.IsPrimitive() || isSamePackage(inType, outType)) -} - -func isSamePackage(inType, outType *types.Type) bool { - return inType.Name.Package == outType.Name.Package -} diff --git a/hack/tools/third_party/conversion-gen/main.go b/hack/tools/third_party/conversion-gen/main.go deleted file mode 100644 index 15c29f3031..0000000000 --- a/hack/tools/third_party/conversion-gen/main.go +++ /dev/null @@ -1,138 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// conversion-gen is a tool for auto-generating functions that convert -// between internal and external types. A general conversion code -// generation task involves three sets of packages: (1) a set of -// packages containing internal types, (2) a single package containing -// the external types, and (3) a single destination package (i.e., -// where the generated conversion functions go, and where the -// developer-authored conversion functions are). The packages -// containing the internal types play the role known as "peer -// packages" in the general code-generation framework of Kubernetes. -// -// For each conversion task, `conversion-gen` will generate functions -// that efficiently convert between same-name types in the two -// (internal, external) packages. The generated functions include -// ones named -// -// autoConvert___To__ -// -// for each such pair of types --- both with (pkg1,pkg2) = -// (internal,external) and (pkg1,pkg2) = (external,internal). The -// generated conversion functions recurse on the structure of the data -// types. For structs, source and destination fields are matched up -// according to name; if a source field has no corresponding -// destination or there is a fundamental mismatch in the type of the -// field then the generated autoConvert_... function has just a -// warning comment about that field. The generated conversion -// functions use standard value assignment wherever possible. For -// compound types, the generated conversion functions call the -// `Convert...` functions for the subsidiary types. -// -// For each pair of types `conversion-gen` will also generate a -// function named -// -// Convert___To__ -// -// if both of two conditions are met: (1) the destination package does -// not contain a function of that name in a non-generated file and (2) -// the generation of the corresponding autoConvert_... function did -// not run into trouble with a missing or fundamentally differently -// typed field. A generated Convert_... function simply calls the -// corresponding `autoConvert...` function. `conversion_gen` also -// generates a function that updates a given `runtime.Scheme` by -// registering all the Convert_... functions found and generated. -// Thus developers can override the generated behavior for selected -// type pairs by putting the desired Convert_... functions in -// non-generated files. Further, developers are practically required -// to override the generated behavior when there are missing or -// fundamentally differently typed fields. -// -// `conversion-gen` will scan its `--input-dirs`, looking at the -// package defined in each of those directories for comment tags that -// define a conversion code generation task. A package requests -// conversion code generation by including one or more comment in the -// package's `doc.go` file (currently anywhere in that file is -// acceptable, but the recommended location is above the `package` -// statement), of the form: -// -// // +k8s:conversion-gen= -// -// This introduces a conversion task, for which the destination -// package is the one containing the file with the tag and the tag -// identifies a package containing internal types. If there is also a -// tag of the form -// -// // +k8s:conversion-gen-external-types= -// -// then it identifies the package containing the external types; -// otherwise they are in the destination package. -// -// For each conversion code generation task, the full set of internal -// packages (AKA peer packages) consists of the ones specified in the -// `k8s:conversion-gen` tags PLUS any specified in the -// `--base-peer-dirs` and `--extra-peer-dirs` flags on the command -// line. -// -// When generating for a package, individual types or fields of structs may opt -// out of Conversion generation by specifying a comment on the of the form: -// -// // +k8s:conversion-gen=false -package main - -import ( - "flag" - - "github.com/spf13/pflag" - "k8s.io/klog/v2" - - generatorargs "k8s.io/code-generator/cmd/conversion-gen/args" - "k8s.io/gengo/v2" - "k8s.io/gengo/v2/generator" - "sigs.k8s.io/cluster-api-provider-aws/hack/tools/third_party/conversion-gen/generators" -) - -func main() { - klog.InitFlags(nil) - args := generatorargs.New() - - args.AddFlags(pflag.CommandLine) - flag.Set("logtostderr", "true") - pflag.CommandLine.AddGoFlagSet(flag.CommandLine) - pflag.Parse() - - if err := args.Validate(); err != nil { - klog.Fatalf("Error: %v", err) - } - - myTargets := func(context *generator.Context) []generator.Target { - return generators.GetTargets(context, args) - } - - // Run it. - if err := gengo.Execute( - generators.NameSystems(), - generators.DefaultNameSystem(), - myTargets, - gengo.StdBuildTag, - pflag.Args(), - ); err != nil { - klog.Fatalf("Error: %v", err) - } - klog.V(2).Info("Completed successfully.") -} - diff --git a/hack/verify-shellcheck.sh b/hack/verify-shellcheck.sh index f42202c113..7df30920e6 100755 --- a/hack/verify-shellcheck.sh +++ b/hack/verify-shellcheck.sh @@ -66,9 +66,9 @@ fi echo "Running shellcheck..." cd "${ROOT_PATH}" || exit -IGNORE_FILES=$(find . -name "*.sh" | grep "third_party\|tilt_modules") +IGNORE_FILES=$(find . -name "*.sh" | grep "tilt_modules") echo "Ignoring shellcheck on ${IGNORE_FILES}" -FILES=$(find . -name "*.sh" -not -path "./tilt_modules/*" -not -path "*third_party*") +FILES=$(find . -name "*.sh" -not -path "./tilt_modules/*") while read -r file; do "$SHELLCHECK" -x "$file" >> "${OUT}" 2>&1 done <<< "$FILES" diff --git a/main.go b/main.go index 8aac35b373..62a64cc53e 100644 --- a/main.go +++ b/main.go @@ -65,8 +65,8 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" "sigs.k8s.io/cluster-api-provider-aws/v2/version" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/flags" ) @@ -80,7 +80,7 @@ func init() { _ = eksbootstrapv1beta1.AddToScheme(scheme) _ = cgscheme.AddToScheme(scheme) _ = clusterv1.AddToScheme(scheme) - _ = expclusterv1.AddToScheme(scheme) + _ = controlplanev1.AddToScheme(scheme) _ = ekscontrolplanev1.AddToScheme(scheme) _ = ekscontrolplanev1beta1.AddToScheme(scheme) _ = rosacontrolplanev1.AddToScheme(scheme) @@ -270,6 +270,17 @@ func main() { os.Exit(1) } + setupLog.Debug("enabling ROSA network controller") + if err = (&expcontrollers.ROSANetworkReconciler{ + Client: mgr.GetClient(), + WatchFilterValue: watchFilterValue, + Log: ctrl.Log.WithName("controllers").WithName("ROSANetwork"), + Scheme: mgr.GetScheme(), + }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: awsClusterConcurrency, RecoverPanic: ptr.To[bool](true)}); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "ROSANetwork") + os.Exit(1) + } + if err := (&rosacontrolplanev1.ROSAControlPlane{}).SetupWebhookWithManager(mgr); err != nil { setupLog.Error(err, "unable to create webhook", "webhook", "ROSAControlPlane") os.Exit(1) @@ -279,8 +290,27 @@ func main() { setupLog.Error(err, "unable to create webhook", "webhook", "ROSAMachinePool") os.Exit(1) } - } + if err := (&expinfrav1.ROSANetwork{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "ROSANetwork") + os.Exit(1) + } + + setupLog.Debug("enabling ROSA role config controller") + if err = (&expcontrollers.ROSARoleConfigReconciler{ + Client: mgr.GetClient(), + Recorder: mgr.GetEventRecorderFor("rosaroleconfig-controller"), + WatchFilterValue: watchFilterValue, + }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: awsClusterConcurrency, RecoverPanic: ptr.To[bool](true)}); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "ROSARoleConfig") + os.Exit(1) + } + + if err := (&expinfrav1.ROSARoleConfig{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "ROSARoleConfig") + os.Exit(1) + } + } // +kubebuilder:scaffold:builder if err := mgr.AddReadyzCheck("webhook", mgr.GetWebhookServer().StartedChecker()); err != nil { @@ -330,9 +360,19 @@ func setupReconcilersAndWebhooks(ctx context.Context, mgr ctrl.Manager, setupLog.Error(err, "unable to create controller", "controller", "AWSCluster") os.Exit(1) } + + setupLog.Info("enabling AWSMachineTemplate controller") + if err := (&controllers.AWSMachineTemplateReconciler{ + Client: mgr.GetClient(), + WatchFilterValue: watchFilterValue, + }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: awsClusterConcurrency, RecoverPanic: ptr.To[bool](true)}); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "AWSMachineTemplate") + os.Exit(1) + } } else { setupLog.Info("controller disabled", "controller", "AWSMachine", "controller-group", controllers.Unmanaged) setupLog.Info("controller disabled", "controller", "AWSCluster", "controller-group", controllers.Unmanaged) + setupLog.Info("controller disabled", "controller", "AWSMachineTemplate", "controller-group", controllers.Unmanaged) } if feature.Gates.Enabled(feature.MachinePool) { diff --git a/metadata.yaml b/metadata.yaml index 29f752fea3..cb4ec87a08 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -66,3 +66,9 @@ releaseSeries: - major: 2 minor: 9 contract: v1beta1 + - major: 2 + minor: 10 + contract: v1beta1 + - major: 2 + minor: 11 + contract: v1beta1 diff --git a/pkg/cloud/converters/eks.go b/pkg/cloud/converters/eks.go index fbb35c67c3..243b6d9cbd 100644 --- a/pkg/cloud/converters/eks.go +++ b/pkg/cloud/converters/eks.go @@ -189,11 +189,11 @@ func NodegroupUpdateconfigToSDK(updateConfig *expinfrav1.UpdateConfig) (*ekstype converted := &ekstypes.NodegroupUpdateConfig{} if updateConfig.MaxUnavailable != nil { - //nolint:gosec,G115 // Added golint exception as there is a kubebuilder validation configured + //nolint:G115 // Added golint exception as there is a kubebuilder validation configured converted.MaxUnavailable = aws.Int32(int32(*updateConfig.MaxUnavailable)) } if updateConfig.MaxUnavailablePercentage != nil { - //nolint:gosec,G115 // Added golint exception as there is a kubebuilder validation configured + //nolint:G115 // Added golint exception as there is a kubebuilder validation configured converted.MaxUnavailablePercentage = aws.Int32(int32(*updateConfig.MaxUnavailablePercentage)) } @@ -217,6 +217,20 @@ func NodegroupUpdateconfigFromSDK(ngUpdateConfig *ekstypes.NodegroupUpdateConfig return converted } +// NodeRepairConfigToSDK is used to convert a CAPA NodeRepairConfig to AWS SDK NodeRepairConfig. +func NodeRepairConfigToSDK(repairConfig *expinfrav1.NodeRepairConfig) *ekstypes.NodeRepairConfig { + if repairConfig == nil { + // Default to disabled if not specified to avoid behavior changes + return &ekstypes.NodeRepairConfig{ + Enabled: aws.Bool(false), + } + } + + return &ekstypes.NodeRepairConfig{ + Enabled: repairConfig.Enabled, + } +} + // AMITypeToSDK converts a CAPA ManagedMachineAMIType to AWS SDK AMIType. func AMITypeToSDK(amiType expinfrav1.ManagedMachineAMIType) ekstypes.AMITypes { switch amiType { @@ -278,3 +292,11 @@ func AddonConflictResolutionFromSDK(conflict ekstypes.ResolveConflicts) *string } return aws.String(string(ekscontrolplanev1.AddonResolutionOverwrite)) } + +// SupportTypeToSDK converts CAPA upgrade support policy types to SDK types. +func SupportTypeToSDK(input ekscontrolplanev1.UpgradePolicy) ekstypes.SupportType { + if input == ekscontrolplanev1.UpgradePolicyStandard { + return ekstypes.SupportTypeStandard + } + return ekstypes.SupportTypeExtended +} diff --git a/pkg/cloud/converters/eks_test.go b/pkg/cloud/converters/eks_test.go new file mode 100644 index 0000000000..5e87ba0f0e --- /dev/null +++ b/pkg/cloud/converters/eks_test.go @@ -0,0 +1,65 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package converters + +import ( + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + + expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" +) + +func TestNodeRepairConfigToSDK(t *testing.T) { + tests := []struct { + name string + input *expinfrav1.NodeRepairConfig + expected *ekstypes.NodeRepairConfig + }{ + { + name: "nil input returns default disabled", + input: nil, + expected: &ekstypes.NodeRepairConfig{Enabled: aws.Bool(false)}, + }, + { + name: "enabled repair config", + input: &expinfrav1.NodeRepairConfig{ + Enabled: aws.Bool(true), + }, + expected: &ekstypes.NodeRepairConfig{Enabled: aws.Bool(true)}, + }, + { + name: "disabled repair config", + input: &expinfrav1.NodeRepairConfig{ + Enabled: aws.Bool(false), + }, + expected: &ekstypes.NodeRepairConfig{Enabled: aws.Bool(false)}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := NodeRepairConfigToSDK(tt.input) + if !cmp.Equal(result, tt.expected, cmpopts.IgnoreUnexported(ekstypes.NodeRepairConfig{})) { + t.Errorf("NodeRepairConfigToSDK() diff (-want +got):\n%s", cmp.Diff(tt.expected, result, cmpopts.IgnoreUnexported(ekstypes.NodeRepairConfig{}))) + } + }) + } +} diff --git a/pkg/cloud/endpoints/partitions.go b/pkg/cloud/endpoints/partitions.go index 65ff2f0d6a..66c42917c3 100644 --- a/pkg/cloud/endpoints/partitions.go +++ b/pkg/cloud/endpoints/partitions.go @@ -129,6 +129,13 @@ var partitions = []Partition { SupportsFIPS: nil, SupportsDualStack: nil, }, + "ap-southeast-6": RegionOverrides{ + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, "ap-southeast-7": RegionOverrides{ Name: nil, DnsSuffix: nil, @@ -314,32 +321,18 @@ var partitions = []Partition { }, }, Partition { - ID: "aws-us-gov", - RegionRegex: "^us\\-gov\\-\\w+\\-\\d+$", + ID: "aws-eusc", + RegionRegex: "^eusc\\-(de)\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws-us-gov", - DnsSuffix: "amazonaws.com", - DualStackDnsSuffix: "api.aws", + Name: "aws-eusc", + DnsSuffix: "amazonaws.eu", + DualStackDnsSuffix: "api.amazonwebservices.eu", SupportsFIPS: true, SupportsDualStack: true, - ImplicitGlobalRegion: "us-gov-west-1", + ImplicitGlobalRegion: "eusc-de-east-1", }, Regions: map[string]RegionOverrides { - "aws-us-gov-global": RegionOverrides{ - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-gov-east-1": RegionOverrides{ - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-gov-west-1": RegionOverrides{ + "eusc-de-east-1": RegionOverrides{ Name: nil, DnsSuffix: nil, DualStackDnsSuffix: nil, @@ -354,9 +347,9 @@ var partitions = []Partition { DefaultConfig: PartitionConfig{ Name: "aws-iso", DnsSuffix: "c2s.ic.gov", - DualStackDnsSuffix: "c2s.ic.gov", + DualStackDnsSuffix: "api.aws.ic.gov", SupportsFIPS: true, - SupportsDualStack: false, + SupportsDualStack: true, ImplicitGlobalRegion: "us-iso-east-1", }, Regions: map[string]RegionOverrides { @@ -389,9 +382,9 @@ var partitions = []Partition { DefaultConfig: PartitionConfig{ Name: "aws-iso-b", DnsSuffix: "sc2s.sgov.gov", - DualStackDnsSuffix: "sc2s.sgov.gov", + DualStackDnsSuffix: "api.aws.scloud", SupportsFIPS: true, - SupportsDualStack: false, + SupportsDualStack: true, ImplicitGlobalRegion: "us-isob-east-1", }, Regions: map[string]RegionOverrides { @@ -417,9 +410,9 @@ var partitions = []Partition { DefaultConfig: PartitionConfig{ Name: "aws-iso-e", DnsSuffix: "cloud.adc-e.uk", - DualStackDnsSuffix: "cloud.adc-e.uk", + DualStackDnsSuffix: "api.cloud-aws.adc-e.uk", SupportsFIPS: true, - SupportsDualStack: false, + SupportsDualStack: true, ImplicitGlobalRegion: "eu-isoe-west-1", }, Regions: map[string]RegionOverrides { @@ -445,9 +438,9 @@ var partitions = []Partition { DefaultConfig: PartitionConfig{ Name: "aws-iso-f", DnsSuffix: "csp.hci.ic.gov", - DualStackDnsSuffix: "csp.hci.ic.gov", + DualStackDnsSuffix: "api.aws.hci.ic.gov", SupportsFIPS: true, - SupportsDualStack: false, + SupportsDualStack: true, ImplicitGlobalRegion: "us-isof-south-1", }, Regions: map[string]RegionOverrides { @@ -475,18 +468,32 @@ var partitions = []Partition { }, }, Partition { - ID: "aws-eusc", - RegionRegex: "^eusc\\-(de)\\-\\w+\\-\\d+$", + ID: "aws-us-gov", + RegionRegex: "^us\\-gov\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws-eusc", - DnsSuffix: "amazonaws.eu", - DualStackDnsSuffix: "amazonaws.eu", + Name: "aws-us-gov", + DnsSuffix: "amazonaws.com", + DualStackDnsSuffix: "api.aws", SupportsFIPS: true, - SupportsDualStack: false, - ImplicitGlobalRegion: "eusc-de-east-1", + SupportsDualStack: true, + ImplicitGlobalRegion: "us-gov-west-1", }, Regions: map[string]RegionOverrides { - "eusc-de-east-1": RegionOverrides{ + "aws-us-gov-global": RegionOverrides{ + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-gov-east-1": RegionOverrides{ + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-gov-west-1": RegionOverrides{ Name: nil, DnsSuffix: nil, DualStackDnsSuffix: nil, diff --git a/pkg/cloud/interfaces.go b/pkg/cloud/interfaces.go index 82c671e2c1..b69800ea2f 100644 --- a/pkg/cloud/interfaces.go +++ b/pkg/cloud/interfaces.go @@ -27,8 +27,8 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) // Session represents an AWS session. @@ -45,7 +45,7 @@ type ScopeUsage interface { // ClusterObject represents a AWS cluster object. type ClusterObject interface { - conditions.Setter + v1beta1conditions.Setter } // ClusterScoper is the interface for a cluster scope. @@ -70,7 +70,7 @@ type ClusterScoper interface { InfraCluster() ClusterObject // Cluster returns the cluster object. - ClusterObj() ClusterObject + ClusterObj() *clusterv1.Cluster // UnstructuredControlPlane returns the unstructured control plane object. UnstructuredControlPlane() (*unstructured.Unstructured, error) @@ -84,7 +84,7 @@ type ClusterScoper interface { // AdditionalTags returns any tags that you would like to attach to AWS resources. The returned value will never be nil. AdditionalTags() infrav1.Tags // SetFailureDomain sets the infrastructure provider failure domain key to the spec given as input. - SetFailureDomain(id string, spec clusterv1.FailureDomainSpec) + SetFailureDomain(id string, spec clusterv1.FailureDomain) // PatchObject persists the cluster configuration and status. PatchObject() error // Close closes the current scope persisting the cluster configuration and status. @@ -104,4 +104,6 @@ type SessionMetadata interface { InfraCluster() ClusterObject // IdentityRef returns the AWS infrastructure cluster identityRef. IdentityRef() *infrav1.AWSIdentityReference + // ControllerName returns the controller name + ControllerName() string } diff --git a/pkg/cloud/scope/cluster.go b/pkg/cloud/scope/cluster.go index 730b977578..c6a064d43e 100644 --- a/pkg/cloud/scope/cluster.go +++ b/pkg/cloud/scope/cluster.go @@ -25,6 +25,7 @@ import ( "github.com/pkg/errors" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/klog/v2" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" @@ -32,9 +33,10 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/endpoints" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" - "sigs.k8s.io/cluster-api/util/patch" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ) // ClusterScopeParams defines the input parameters used to create a new Scope. @@ -79,7 +81,7 @@ func NewClusterScope(params ClusterScopeParams) (*ClusterScope, error) { return nil, errors.Errorf("failed to create aws V2 session: %v", err) } - helper, err := patch.NewHelper(params.AWSCluster, params.Client) + helper, err := v1beta1patch.NewHelper(params.AWSCluster, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init patch helper") } @@ -95,7 +97,7 @@ func NewClusterScope(params ClusterScopeParams) (*ClusterScope, error) { type ClusterScope struct { logger.Logger client client.Client - patchHelper *patch.Helper + patchHelper *v1beta1patch.Helper Cluster *clusterv1.Cluster AWSCluster *infrav1.AWSCluster @@ -207,6 +209,7 @@ func (s *ClusterScope) ControlPlaneLoadBalancers() []*infrav1.AWSLoadBalancerSpe } // ControlPlaneLoadBalancerScheme returns the Classic ELB scheme (public or internal facing). +// // Deprecated: This method is going to be removed in a future release. Use LoadBalancer.Scheme. func (s *ClusterScope) ControlPlaneLoadBalancerScheme() infrav1.ELBScheme { if s.ControlPlaneLoadBalancer() != nil && s.ControlPlaneLoadBalancer().Scheme != nil { @@ -225,7 +228,10 @@ func (s *ClusterScope) ControlPlaneLoadBalancerName() *string { // ControlPlaneEndpoint returns the cluster control plane endpoint. func (s *ClusterScope) ControlPlaneEndpoint() clusterv1.APIEndpoint { - return s.AWSCluster.Spec.ControlPlaneEndpoint + return clusterv1.APIEndpoint{ + Host: s.AWSCluster.Spec.ControlPlaneEndpoint.Host, + Port: s.AWSCluster.Spec.ControlPlaneEndpoint.Port, + } } // Bucket returns the cluster bucket configuration. @@ -250,7 +256,7 @@ func (s *ClusterScope) ListOptionsLabelSelector() client.ListOption { func (s *ClusterScope) PatchObject() error { // Always update the readyCondition by summarizing the state of other conditions. // A step counter is added to represent progress during the provisioning process (instead we are hiding during the deletion process). - applicableConditions := []clusterv1.ConditionType{ + applicableConditions := []clusterv1beta1.ConditionType{ infrav1.VpcReadyCondition, infrav1.SubnetsReadyCondition, infrav1.ClusterSecurityGroupsReadyCondition, @@ -273,17 +279,17 @@ func (s *ClusterScope) PatchObject() error { } } - conditions.SetSummary(s.AWSCluster, - conditions.WithConditions(applicableConditions...), - conditions.WithStepCounterIf(s.AWSCluster.ObjectMeta.DeletionTimestamp.IsZero()), - conditions.WithStepCounter(), + v1beta1conditions.SetSummary(s.AWSCluster, + v1beta1conditions.WithConditions(applicableConditions...), + v1beta1conditions.WithStepCounterIf(s.AWSCluster.ObjectMeta.DeletionTimestamp.IsZero()), + v1beta1conditions.WithStepCounter(), ) return s.patchHelper.Patch( context.TODO(), s.AWSCluster, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ - clusterv1.ReadyCondition, + v1beta1patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ + clusterv1beta1.ReadyCondition, infrav1.VpcReadyCondition, infrav1.SubnetsReadyCondition, infrav1.InternetGatewayReadyCondition, @@ -315,18 +321,21 @@ func (s *ClusterScope) AdditionalTags() infrav1.Tags { // APIServerPort returns the APIServerPort to use when creating the load balancer. func (s *ClusterScope) APIServerPort() int32 { - if s.Cluster.Spec.ClusterNetwork != nil && s.Cluster.Spec.ClusterNetwork.APIServerPort != nil { - return *s.Cluster.Spec.ClusterNetwork.APIServerPort + if s.Cluster.Spec.ClusterNetwork.APIServerPort != 0 { + return s.Cluster.Spec.ClusterNetwork.APIServerPort } return infrav1.DefaultAPIServerPort } // SetFailureDomain sets the infrastructure provider failure domain key to the spec given as input. -func (s *ClusterScope) SetFailureDomain(id string, spec clusterv1.FailureDomainSpec) { +func (s *ClusterScope) SetFailureDomain(id string, spec clusterv1.FailureDomain) { if s.AWSCluster.Status.FailureDomains == nil { - s.AWSCluster.Status.FailureDomains = make(clusterv1.FailureDomains) + s.AWSCluster.Status.FailureDomains = make(clusterv1beta1.FailureDomains) + } + s.AWSCluster.Status.FailureDomains[id] = clusterv1beta1.FailureDomainSpec{ + Attributes: spec.Attributes, + ControlPlane: ptr.Deref(spec.ControlPlane, false), } - s.AWSCluster.Status.FailureDomains[id] = spec } // SetNatGatewaysIPs sets the Nat Gateways Public IPs. @@ -345,7 +354,7 @@ func (s *ClusterScope) InfraCluster() cloud.ClusterObject { } // ClusterObj returns the cluster object. -func (s *ClusterScope) ClusterObj() cloud.ClusterObject { +func (s *ClusterScope) ClusterObj() *clusterv1.Cluster { return s.Cluster } diff --git a/pkg/cloud/scope/elb.go b/pkg/cloud/scope/elb.go index 3d588f665b..0736cd3711 100644 --- a/pkg/cloud/scope/elb.go +++ b/pkg/cloud/scope/elb.go @@ -19,7 +19,7 @@ package scope import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // ELBScope is a scope for use with the ELB reconciling service. @@ -39,10 +39,12 @@ type ELBScope interface { VPC() *infrav1.VPCSpec // ControlPlaneLoadBalancer returns the AWSLoadBalancerSpec + // // Deprecated: Use ControlPlaneLoadBalancers() ControlPlaneLoadBalancer() *infrav1.AWSLoadBalancerSpec // ControlPlaneLoadBalancerScheme returns the Classic ELB scheme (public or internal facing) + // // Deprecated: This method is going to be removed in a future release. Use LoadBalancer.Scheme. ControlPlaneLoadBalancerScheme() infrav1.ELBScheme diff --git a/pkg/cloud/scope/fargate.go b/pkg/cloud/scope/fargate.go index 6e0fe0e1ef..44ec604e27 100644 --- a/pkg/cloud/scope/fargate.go +++ b/pkg/cloud/scope/fargate.go @@ -31,9 +31,10 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/endpoints" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" - "sigs.k8s.io/cluster-api/util/patch" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ) // FargateProfileScopeParams defines the input parameters used to create a new Scope. @@ -73,7 +74,7 @@ func NewFargateProfileScope(params FargateProfileScopeParams) (*FargateProfileSc return nil, errors.Errorf("failed to create aws v2 session: %v", err) } - helper, err := patch.NewHelper(params.FargateProfile, params.Client) + helper, err := v1beta1patch.NewHelper(params.FargateProfile, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init patch helper") } @@ -96,7 +97,7 @@ func NewFargateProfileScope(params FargateProfileScopeParams) (*FargateProfileSc type FargateProfileScope struct { logger.Logger Client client.Client - patchHelper *patch.Helper + patchHelper *v1beta1patch.Helper Cluster *clusterv1.Cluster ControlPlane *ekscontrolplanev1.AWSManagedControlPlane @@ -168,11 +169,11 @@ func (s *FargateProfileScope) Partition() string { // IAMReadyFalse marks the ready condition false using warning if error isn't // empty. func (s *FargateProfileScope) IAMReadyFalse(reason string, err string) error { - severity := clusterv1.ConditionSeverityWarning + severity := clusterv1beta1.ConditionSeverityWarning if err == "" { - severity = clusterv1.ConditionSeverityInfo + severity = clusterv1beta1.ConditionSeverityInfo } - conditions.MarkFalse( + v1beta1conditions.MarkFalse( s.FargateProfile, expinfrav1.IAMFargateRolesReadyCondition, reason, @@ -191,7 +192,7 @@ func (s *FargateProfileScope) PatchObject() error { return s.patchHelper.Patch( context.TODO(), s.FargateProfile, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + v1beta1patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ expinfrav1.EKSFargateProfileReadyCondition, expinfrav1.EKSFargateCreatingCondition, expinfrav1.EKSFargateDeletingCondition, @@ -210,7 +211,7 @@ func (s *FargateProfileScope) InfraCluster() cloud.ClusterObject { } // ClusterObj returns the cluster object. -func (s *FargateProfileScope) ClusterObj() cloud.ClusterObject { +func (s *FargateProfileScope) ClusterObj() *clusterv1.Cluster { return s.Cluster } diff --git a/pkg/cloud/scope/launchtemplate.go b/pkg/cloud/scope/launchtemplate.go index 34e84e7ff7..f67a082783 100644 --- a/pkg/cloud/scope/launchtemplate.go +++ b/pkg/cloud/scope/launchtemplate.go @@ -24,13 +24,13 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) // LaunchTemplateScope defines a scope defined around a launch template. type LaunchTemplateScope interface { - GetMachinePool() *expclusterv1.MachinePool + GetMachinePool() *clusterv1.MachinePool GetLaunchTemplate() *expinfrav1.AWSLaunchTemplate LaunchTemplateName() string GetLaunchTemplateIDStatus() string @@ -43,7 +43,7 @@ type LaunchTemplateScope interface { AdditionalTags() infrav1.Tags GetObjectMeta() *metav1.ObjectMeta - GetSetter() conditions.Setter + GetSetter() v1beta1conditions.Setter PatchObject() error GetEC2Scope() EC2Scope diff --git a/pkg/cloud/scope/machine.go b/pkg/cloud/scope/machine.go index 243bd40242..c0a8d8122a 100644 --- a/pkg/cloud/scope/machine.go +++ b/pkg/cloud/scope/machine.go @@ -31,11 +31,12 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/conditions" - "sigs.k8s.io/cluster-api/util/patch" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ) // MachineScopeParams defines the input parameters used to create a new MachineScope. @@ -72,7 +73,7 @@ func NewMachineScope(params MachineScopeParams) (*MachineScope, error) { params.Logger = logger.NewLogger(log) } - helper, err := patch.NewHelper(params.AWSMachine, params.Client) + helper, err := v1beta1patch.NewHelper(params.AWSMachine, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init patch helper") } @@ -91,7 +92,7 @@ func NewMachineScope(params MachineScopeParams) (*MachineScope, error) { type MachineScope struct { logger.Logger client client.Client - patchHelper *patch.Helper + patchHelper *v1beta1patch.Helper Cluster *clusterv1.Cluster Machine *clusterv1.Machine @@ -258,7 +259,7 @@ func (m *MachineScope) SetSecretCount(i int32) { } // SetAddresses sets the AWSMachine address status. -func (m *MachineScope) SetAddresses(addrs []clusterv1.MachineAddress) { +func (m *MachineScope) SetAddresses(addrs []clusterv1beta1.MachineAddress) { m.AWSMachine.Status.Addresses = addrs } @@ -302,7 +303,7 @@ func (m *MachineScope) GetRawBootstrapDataWithFormat() ([]byte, string, error) { func (m *MachineScope) PatchObject() error { // Always update the readyCondition by summarizing the state of other conditions. // A step counter is added to represent progress during the provisioning process (instead we are hiding during the deletion process). - applicableConditions := []clusterv1.ConditionType{ + applicableConditions := []clusterv1beta1.ConditionType{ infrav1.InstanceReadyCondition, infrav1.SecurityGroupsReadyCondition, } @@ -311,17 +312,17 @@ func (m *MachineScope) PatchObject() error { applicableConditions = append(applicableConditions, infrav1.ELBAttachedCondition) } - conditions.SetSummary(m.AWSMachine, - conditions.WithConditions(applicableConditions...), - conditions.WithStepCounterIf(m.AWSMachine.ObjectMeta.DeletionTimestamp.IsZero()), - conditions.WithStepCounter(), + v1beta1conditions.SetSummary(m.AWSMachine, + v1beta1conditions.WithConditions(applicableConditions...), + v1beta1conditions.WithStepCounterIf(m.AWSMachine.ObjectMeta.DeletionTimestamp.IsZero()), + v1beta1conditions.WithStepCounter(), ) return m.patchHelper.Patch( context.TODO(), m.AWSMachine, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ - clusterv1.ReadyCondition, + v1beta1patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ + clusterv1beta1.ReadyCondition, infrav1.InstanceReadyCondition, infrav1.SecurityGroupsReadyCondition, infrav1.ELBAttachedCondition, diff --git a/pkg/cloud/scope/machine_test.go b/pkg/cloud/scope/machine_test.go index f34790d061..9e219fedbc 100644 --- a/pkg/cloud/scope/machine_test.go +++ b/pkg/cloud/scope/machine_test.go @@ -28,7 +28,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func setupScheme() (*runtime.Scheme, error) { diff --git a/pkg/cloud/scope/machinepool.go b/pkg/cloud/scope/machinepool.go index f9e7fd1225..c817e2971f 100644 --- a/pkg/cloud/scope/machinepool.go +++ b/pkg/cloud/scope/machinepool.go @@ -34,11 +34,12 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/controllers/remote" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/cluster-api/util/patch" ) @@ -46,11 +47,11 @@ import ( type MachinePoolScope struct { logger.Logger client.Client - patchHelper *patch.Helper + patchHelper *v1beta1patch.Helper capiMachinePoolPatchHelper *patch.Helper Cluster *clusterv1.Cluster - MachinePool *expclusterv1.MachinePool + MachinePool *clusterv1.MachinePool InfraCluster EC2Scope AWSMachinePool *expinfrav1.AWSMachinePool } @@ -61,7 +62,7 @@ type MachinePoolScopeParams struct { Logger *logger.Logger Cluster *clusterv1.Cluster - MachinePool *expclusterv1.MachinePool + MachinePool *clusterv1.MachinePool InfraCluster EC2Scope AWSMachinePool *expinfrav1.AWSMachinePool } @@ -98,7 +99,7 @@ func NewMachinePoolScope(params MachinePoolScopeParams) (*MachinePoolScope, erro params.Logger = logger.NewLogger(log) } - ampHelper, err := patch.NewHelper(params.AWSMachinePool, params.Client) + ampHelper, err := v1beta1patch.NewHelper(params.AWSMachinePool, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init AWSMachinePool patch helper") } @@ -175,7 +176,7 @@ func (m *MachinePoolScope) PatchObject() error { return m.patchHelper.Patch( context.TODO(), m.AWSMachinePool, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + v1beta1patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ expinfrav1.ASGReadyCondition, expinfrav1.LaunchTemplateReadyCondition, }}) @@ -238,7 +239,7 @@ func (m *MachinePoolScope) GetObjectMeta() *metav1.ObjectMeta { } // GetSetter returns the AWSMachinePool object setter. -func (m *MachinePoolScope) GetSetter() conditions.Setter { +func (m *MachinePoolScope) GetSetter() v1beta1conditions.Setter { return m.AWSMachinePool } @@ -380,7 +381,7 @@ func (m *MachinePoolScope) GetLaunchTemplate() *expinfrav1.AWSLaunchTemplate { } // GetMachinePool returns the machine pool object. -func (m *MachinePoolScope) GetMachinePool() *expclusterv1.MachinePool { +func (m *MachinePoolScope) GetMachinePool() *clusterv1.MachinePool { return m.MachinePool } diff --git a/pkg/cloud/scope/managedcontrolplane.go b/pkg/cloud/scope/managedcontrolplane.go index be0bc76864..1571dbb4e2 100644 --- a/pkg/cloud/scope/managedcontrolplane.go +++ b/pkg/cloud/scope/managedcontrolplane.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/klog/v2" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" @@ -38,14 +39,13 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/endpoints" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/controllers/remote" - "sigs.k8s.io/cluster-api/util/patch" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ) -var ( - scheme = runtime.NewScheme() -) +var scheme = runtime.NewScheme() func init() { _ = amazoncni.AddToScheme(scheme) @@ -104,7 +104,7 @@ func NewManagedControlPlaneScope(params ManagedControlPlaneScopeParams) (*Manage managedScope.session = *session managedScope.serviceLimiters = serviceLimiters - helper, err := patch.NewHelper(params.ControlPlane, params.Client) + helper, err := v1beta1patch.NewHelper(params.ControlPlane, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init patch helper") } @@ -117,7 +117,7 @@ func NewManagedControlPlaneScope(params ManagedControlPlaneScopeParams) (*Manage type ManagedControlPlaneScope struct { logger.Logger Client client.Client - patchHelper *patch.Helper + patchHelper *v1beta1patch.Helper Cluster *clusterv1.Cluster ControlPlane *ekscontrolplanev1.AWSManagedControlPlane @@ -268,7 +268,7 @@ func (s *ManagedControlPlaneScope) PatchObject() error { return s.patchHelper.Patch( context.TODO(), s.ControlPlane, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + v1beta1patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ infrav1.VpcReadyCondition, infrav1.SubnetsReadyCondition, infrav1.ClusterSecurityGroupsReadyCondition, @@ -305,11 +305,14 @@ func (s *ManagedControlPlaneScope) APIServerPort() int32 { } // SetFailureDomain sets the infrastructure provider failure domain key to the spec given as input. -func (s *ManagedControlPlaneScope) SetFailureDomain(id string, spec clusterv1.FailureDomainSpec) { +func (s *ManagedControlPlaneScope) SetFailureDomain(id string, spec clusterv1.FailureDomain) { if s.ControlPlane.Status.FailureDomains == nil { - s.ControlPlane.Status.FailureDomains = make(clusterv1.FailureDomains) + s.ControlPlane.Status.FailureDomains = make(clusterv1beta1.FailureDomains) + } + s.ControlPlane.Status.FailureDomains[id] = clusterv1beta1.FailureDomainSpec{ + ControlPlane: ptr.Deref(spec.ControlPlane, false), + Attributes: spec.Attributes, } - s.ControlPlane.Status.FailureDomains[id] = spec } // InfraCluster returns the AWS infrastructure cluster or control plane object. @@ -318,7 +321,7 @@ func (s *ManagedControlPlaneScope) InfraCluster() cloud.ClusterObject { } // ClusterObj returns the cluster object. -func (s *ManagedControlPlaneScope) ClusterObj() cloud.ClusterObject { +func (s *ManagedControlPlaneScope) ClusterObj() *clusterv1.Cluster { return s.Cluster } @@ -447,12 +450,8 @@ func (s *ManagedControlPlaneScope) OIDCIdentityProviderConfig() *ekscontrolplane // ServiceCidrs returns the CIDR blocks used for services. func (s *ManagedControlPlaneScope) ServiceCidrs() *clusterv1.NetworkRanges { - if s.Cluster.Spec.ClusterNetwork != nil { - if s.Cluster.Spec.ClusterNetwork.Services != nil { - if len(s.Cluster.Spec.ClusterNetwork.Services.CIDRBlocks) > 0 { - return s.Cluster.Spec.ClusterNetwork.Services - } - } + if len(s.Cluster.Spec.ClusterNetwork.Services.CIDRBlocks) > 0 { + return &s.Cluster.Spec.ClusterNetwork.Services } return nil diff --git a/pkg/cloud/scope/managednodegroup.go b/pkg/cloud/scope/managednodegroup.go index 7ef4663a24..d0b2116706 100644 --- a/pkg/cloud/scope/managednodegroup.go +++ b/pkg/cloud/scope/managednodegroup.go @@ -37,9 +37,10 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/endpoints" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/cluster-api/util/patch" ) @@ -50,7 +51,7 @@ type ManagedMachinePoolScopeParams struct { Cluster *clusterv1.Cluster ControlPlane *ekscontrolplanev1.AWSManagedControlPlane ManagedMachinePool *expinfrav1.AWSManagedMachinePool - MachinePool *expclusterv1.MachinePool + MachinePool *clusterv1.MachinePool ControllerName string Session awsv2.Config MaxWaitActiveUpdateDelete time.Duration @@ -92,7 +93,7 @@ func NewManagedMachinePoolScope(params ManagedMachinePoolScopeParams) (*ManagedM return nil, errors.Errorf("failed to create aws V2 session: %v", err) } - ammpHelper, err := patch.NewHelper(params.ManagedMachinePool, params.Client) + ammpHelper, err := v1beta1patch.NewHelper(params.ManagedMachinePool, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init AWSManagedMachinePool patch helper") } @@ -125,13 +126,13 @@ func NewManagedMachinePoolScope(params ManagedMachinePoolScopeParams) (*ManagedM type ManagedMachinePoolScope struct { logger.Logger client.Client - patchHelper *patch.Helper + patchHelper *v1beta1patch.Helper capiMachinePoolPatchHelper *patch.Helper Cluster *clusterv1.Cluster ControlPlane *ekscontrolplanev1.AWSManagedControlPlane ManagedMachinePool *expinfrav1.AWSManagedMachinePool - MachinePool *expclusterv1.MachinePool + MachinePool *clusterv1.MachinePool EC2Scope EC2Scope MaxWaitActiveUpdateDelete time.Duration @@ -201,7 +202,10 @@ func (s *ManagedMachinePoolScope) RoleName() string { // Version returns the nodegroup Kubernetes version. func (s *ManagedMachinePoolScope) Version() *string { - return s.MachinePool.Spec.Template.Spec.Version + if s.MachinePool.Spec.Template.Spec.Version == "" { + return nil + } + return &s.MachinePool.Spec.Template.Spec.Version } // ControlPlaneSubnets returns the control plane subnets. @@ -228,11 +232,11 @@ func (s *ManagedMachinePoolScope) SubnetIDs() ([]string, error) { // NodegroupReadyFalse marks the ready condition false using warning if error isn't // empty. func (s *ManagedMachinePoolScope) NodegroupReadyFalse(reason string, err string) error { - severity := clusterv1.ConditionSeverityWarning + severity := clusterv1beta1.ConditionSeverityWarning if err == "" { - severity = clusterv1.ConditionSeverityInfo + severity = clusterv1beta1.ConditionSeverityInfo } - conditions.MarkFalse( + v1beta1conditions.MarkFalse( s.ManagedMachinePool, expinfrav1.EKSNodegroupReadyCondition, reason, @@ -249,11 +253,11 @@ func (s *ManagedMachinePoolScope) NodegroupReadyFalse(reason string, err string) // IAMReadyFalse marks the ready condition false using warning if error isn't // empty. func (s *ManagedMachinePoolScope) IAMReadyFalse(reason string, err string) error { - severity := clusterv1.ConditionSeverityWarning + severity := clusterv1beta1.ConditionSeverityWarning if err == "" { - severity = clusterv1.ConditionSeverityInfo + severity = clusterv1beta1.ConditionSeverityInfo } - conditions.MarkFalse( + v1beta1conditions.MarkFalse( s.ManagedMachinePool, expinfrav1.IAMNodegroupRolesReadyCondition, reason, @@ -272,7 +276,7 @@ func (s *ManagedMachinePoolScope) PatchObject() error { return s.patchHelper.Patch( context.TODO(), s.ManagedMachinePool, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + v1beta1patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ expinfrav1.EKSNodegroupReadyCondition, expinfrav1.IAMNodegroupRolesReadyCondition, }}) @@ -297,7 +301,7 @@ func (s *ManagedMachinePoolScope) InfraCluster() cloud.ClusterObject { } // ClusterObj returns the cluster object. -func (s *ManagedMachinePoolScope) ClusterObj() cloud.ClusterObject { +func (s *ManagedMachinePoolScope) ClusterObj() *clusterv1.Cluster { return s.Cluster } @@ -364,7 +368,7 @@ func (s *ManagedMachinePoolScope) GetObjectMeta() *metav1.ObjectMeta { } // GetSetter returns the condition setter. -func (s *ManagedMachinePoolScope) GetSetter() conditions.Setter { +func (s *ManagedMachinePoolScope) GetSetter() v1beta1conditions.Setter { return s.ManagedMachinePool } @@ -410,7 +414,7 @@ func (s *ManagedMachinePoolScope) GetLaunchTemplate() *expinfrav1.AWSLaunchTempl } // GetMachinePool returns the machine pool. -func (s *ManagedMachinePoolScope) GetMachinePool() *expclusterv1.MachinePool { +func (s *ManagedMachinePoolScope) GetMachinePool() *clusterv1.MachinePool { return s.MachinePool } diff --git a/pkg/cloud/scope/rosacontrolplane.go b/pkg/cloud/scope/rosacontrolplane.go index 4aac52bd01..373cd02f14 100644 --- a/pkg/cloud/scope/rosacontrolplane.go +++ b/pkg/cloud/scope/rosacontrolplane.go @@ -35,8 +35,9 @@ import ( stsservice "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/sts" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/patch" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ) // ROSAControlPlaneScopeParams defines the input parameters used to create a new ROSAControlPlaneScope. @@ -76,7 +77,7 @@ func NewROSAControlPlaneScope(params ROSAControlPlaneScopeParams) (*ROSAControlP return nil, errors.Errorf("failed to create aws V2 session: %v", err) } - helper, err := patch.NewHelper(params.ControlPlane, params.Client) + helper, err := v1beta1patch.NewHelper(params.ControlPlane, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init patch helper") } @@ -99,7 +100,7 @@ func NewROSAControlPlaneScope(params ROSAControlPlaneScopeParams) (*ROSAControlP type ROSAControlPlaneScope struct { logger.Logger Client client.Client - patchHelper *patch.Helper + patchHelper *v1beta1patch.Helper Cluster *clusterv1.Cluster ControlPlane *rosacontrolplanev1.ROSAControlPlane @@ -138,9 +139,11 @@ func (s *ROSAControlPlaneScope) ControllerName() string { return s.controllerName } -var _ cloud.ScopeUsage = (*ROSAControlPlaneScope)(nil) -var _ cloud.Session = (*ROSAControlPlaneScope)(nil) -var _ cloud.SessionMetadata = (*ROSAControlPlaneScope)(nil) +var ( + _ cloud.ScopeUsage = (*ROSAControlPlaneScope)(nil) + _ cloud.Session = (*ROSAControlPlaneScope)(nil) + _ cloud.SessionMetadata = (*ROSAControlPlaneScope)(nil) +) // Name returns the CAPI cluster name. func (s *ROSAControlPlaneScope) Name() string { @@ -162,6 +165,11 @@ func (s *ROSAControlPlaneScope) Namespace() string { return s.Cluster.Namespace } +// GetClient return Client of this scope. +func (s *ROSAControlPlaneScope) GetClient() client.Client { + return s.Client +} + // CredentialsSecret returns the CredentialsSecret object. func (s *ROSAControlPlaneScope) CredentialsSecret() *corev1.Secret { secretRef := s.ControlPlane.Spec.CredentialsSecretRef @@ -207,7 +215,7 @@ func (s *ROSAControlPlaneScope) PatchObject() error { return s.patchHelper.Patch( context.TODO(), s.ControlPlane, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + v1beta1patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ rosacontrolplanev1.ROSAControlPlaneReadyCondition, rosacontrolplanev1.ROSAControlPlaneValidCondition, rosacontrolplanev1.ROSAControlPlaneUpgradingCondition, diff --git a/pkg/cloud/scope/rosamachinepool.go b/pkg/cloud/scope/rosamachinepool.go index 5c53635b5b..aa93ad8e56 100644 --- a/pkg/cloud/scope/rosamachinepool.go +++ b/pkg/cloud/scope/rosamachinepool.go @@ -30,9 +30,10 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/cluster-api/util/patch" ) @@ -43,7 +44,7 @@ type RosaMachinePoolScopeParams struct { Cluster *clusterv1.Cluster ControlPlane *rosacontrolplanev1.ROSAControlPlane RosaMachinePool *expinfrav1.ROSAMachinePool - MachinePool *expclusterv1.MachinePool + MachinePool *clusterv1.MachinePool ControllerName string } @@ -64,7 +65,7 @@ func NewRosaMachinePoolScope(params RosaMachinePoolScopeParams) (*RosaMachinePoo params.Logger = logger.NewLogger(log) } - ammpHelper, err := patch.NewHelper(params.RosaMachinePool, params.Client) + ammpHelper, err := v1beta1patch.NewHelper(params.RosaMachinePool, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init RosaMachinePool patch helper") } @@ -103,13 +104,13 @@ var _ cloud.SessionMetadata = &RosaMachinePoolScope{} type RosaMachinePoolScope struct { logger.Logger client.Client - patchHelper *patch.Helper + patchHelper *v1beta1patch.Helper capiMachinePoolPatchHelper *patch.Helper Cluster *clusterv1.Cluster ControlPlane *rosacontrolplanev1.ROSAControlPlane RosaMachinePool *expinfrav1.ROSAMachinePool - MachinePool *expclusterv1.MachinePool + MachinePool *clusterv1.MachinePool session awsv2.Config serviceLimiters throttle.ServiceLimiters @@ -143,7 +144,7 @@ func (s *RosaMachinePoolScope) InfraCluster() cloud.ClusterObject { } // ClusterObj returns the cluster object. -func (s *RosaMachinePoolScope) ClusterObj() cloud.ClusterObject { +func (s *RosaMachinePoolScope) ClusterObj() *clusterv1.Cluster { return s.Cluster } @@ -154,7 +155,7 @@ func (s *RosaMachinePoolScope) ControllerName() string { } // GetSetter returns the condition setter for the RosaMachinePool. -func (s *RosaMachinePoolScope) GetSetter() conditions.Setter { +func (s *RosaMachinePoolScope) GetSetter() v1beta1conditions.Setter { return s.RosaMachinePool } @@ -189,11 +190,11 @@ func (s *RosaMachinePoolScope) Namespace() string { // RosaMachinePoolReadyFalse marks the ready condition false using warning if error isn't // empty. func (s *RosaMachinePoolScope) RosaMachinePoolReadyFalse(reason string, err string) error { - severity := clusterv1.ConditionSeverityWarning + severity := clusterv1beta1.ConditionSeverityWarning if err == "" { - severity = clusterv1.ConditionSeverityInfo + severity = clusterv1beta1.ConditionSeverityInfo } - conditions.MarkFalse( + v1beta1conditions.MarkFalse( s.RosaMachinePool, expinfrav1.RosaMachinePoolReadyCondition, reason, @@ -212,7 +213,7 @@ func (s *RosaMachinePoolScope) PatchObject() error { return s.patchHelper.Patch( context.TODO(), s.RosaMachinePool, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + v1beta1patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ expinfrav1.RosaMachinePoolReadyCondition, }}) } diff --git a/pkg/cloud/scope/rosanetwork.go b/pkg/cloud/scope/rosanetwork.go new file mode 100644 index 0000000000..33613165bd --- /dev/null +++ b/pkg/cloud/scope/rosanetwork.go @@ -0,0 +1,136 @@ +/* + Copyright The Kubernetes Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package scope + +import ( + "context" + + awsv2 "github.com/aws/aws-sdk-go-v2/aws" + "github.com/pkg/errors" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" + + infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" +) + +// ROSANetworkScopeParams defines the input parameters used to create a new ROSANetworkScope. +type ROSANetworkScopeParams struct { + Client client.Client + ControllerName string + Logger *logger.Logger + ROSANetwork *expinfrav1.ROSANetwork +} + +// ROSANetworkScope defines the basic context for an actuator to operate upon. +type ROSANetworkScope struct { + logger.Logger + Client client.Client + controllerName string + patchHelper *v1beta1patch.Helper + ROSANetwork *expinfrav1.ROSANetwork + serviceLimiters throttle.ServiceLimiters + session awsv2.Config +} + +// NewROSANetworkScope creates a new NewROSANetworkScope from the supplied parameters. +func NewROSANetworkScope(params ROSANetworkScopeParams) (*ROSANetworkScope, error) { + if params.Logger == nil { + log := klog.Background() + params.Logger = logger.NewLogger(log) + } + + rosaNetworkScope := &ROSANetworkScope{ + Logger: *params.Logger, + Client: params.Client, + controllerName: params.ControllerName, + patchHelper: nil, + ROSANetwork: params.ROSANetwork, + } + + session, serviceLimiters, err := sessionForClusterWithRegion(params.Client, rosaNetworkScope, params.ROSANetwork.Spec.Region, params.Logger) + if err != nil { + return nil, errors.Errorf("failed to create aws V2 session: %v", err) + } + + patchHelper, err := v1beta1patch.NewHelper(params.ROSANetwork, params.Client) + if err != nil { + return nil, errors.Wrap(err, "failed to init patch helper") + } + + rosaNetworkScope.patchHelper = patchHelper + rosaNetworkScope.session = *session + rosaNetworkScope.serviceLimiters = serviceLimiters + + return rosaNetworkScope, nil +} + +// Session returns the AWS SDK V2 Config. Used for creating clients. +func (s *ROSANetworkScope) Session() awsv2.Config { + return s.session +} + +// IdentityRef returns the AWSIdentityReference object. +func (s *ROSANetworkScope) IdentityRef() *infrav1.AWSIdentityReference { + return s.ROSANetwork.Spec.IdentityRef +} + +// ServiceLimiter returns the AWS SDK session (used for creating clients). +func (s *ROSANetworkScope) ServiceLimiter(service string) *throttle.ServiceLimiter { + if sl, ok := s.serviceLimiters[service]; ok { + return sl + } + return nil +} + +// ControllerName returns the name of the controller. +func (s *ROSANetworkScope) ControllerName() string { + return s.controllerName +} + +// InfraCluster returns the ROSANetwork object. +// The method is then used in session.go to set proper Conditions for the ROSANetwork object. +func (s *ROSANetworkScope) InfraCluster() cloud.ClusterObject { + return s.ROSANetwork +} + +// InfraClusterName returns the name of the ROSANetwork object. +// The method is then used in session.go to set the key to the AWS session cache. +func (s *ROSANetworkScope) InfraClusterName() string { + return s.ROSANetwork.Name +} + +// Namespace returns the namespace of the ROSANetwork object. +// The method is then used in session.go to set the key to the AWS session cache. +func (s *ROSANetworkScope) Namespace() string { + return s.ROSANetwork.Namespace +} + +// PatchObject persists the rosanetwork configuration and status. +func (s *ROSANetworkScope) PatchObject() error { + return s.patchHelper.Patch( + context.TODO(), + s.ROSANetwork, + v1beta1patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ + expinfrav1.ROSANetworkReadyCondition, + }}) +} diff --git a/pkg/cloud/scope/rosanetwork_test.go b/pkg/cloud/scope/rosanetwork_test.go new file mode 100644 index 0000000000..d75531d8cf --- /dev/null +++ b/pkg/cloud/scope/rosanetwork_test.go @@ -0,0 +1,122 @@ +/* + Copyright The Kubernetes Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package scope + +import ( + "testing" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" + "sigs.k8s.io/cluster-api-provider-aws/v2/util/system" +) + +func TestNewROSANetworkScope(t *testing.T) { + g := NewGomegaWithT(t) + + scheme := runtime.NewScheme() + corev1.AddToScheme(scheme) + infrav1.AddToScheme(scheme) + expinfrav1.AddToScheme(scheme) + + clusterControllerIdentity := &infrav1.AWSClusterControllerIdentity{ + TypeMeta: metav1.TypeMeta{ + Kind: string(infrav1.ControllerIdentityKind), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + }, + Spec: infrav1.AWSClusterControllerIdentitySpec{ + AWSClusterIdentitySpec: infrav1.AWSClusterIdentitySpec{ + AllowedNamespaces: &infrav1.AllowedNamespaces{}, + }, + }, + } + + staticSecret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "static-secret", + Namespace: system.GetManagerNamespace(), + }, + Data: map[string][]byte{ + "AccessKeyID": []byte("access-key-id"), + "SecretAccessKey": []byte("secret-access-key"), + }, + } + + clusterStaticIdentity := &infrav1.AWSClusterStaticIdentity{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-static-identity", + }, + Spec: infrav1.AWSClusterStaticIdentitySpec{ + SecretRef: "static-secret", + AWSClusterIdentitySpec: infrav1.AWSClusterIdentitySpec{ + AllowedNamespaces: &infrav1.AllowedNamespaces{}, + }, + }, + } + + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(clusterControllerIdentity, staticSecret, clusterStaticIdentity).Build() + + rosaNetwork := expinfrav1.ROSANetwork{ + TypeMeta: metav1.TypeMeta{ + Kind: "ROSANetwork", + APIVersion: "v1beta2", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-rosa-net", + Namespace: "test-namespace", + }, + Spec: expinfrav1.ROSANetworkSpec{ + IdentityRef: &infrav1.AWSIdentityReference{ + Name: "default", + Kind: "AWSClusterControllerIdentity", + }, + }, + Status: expinfrav1.ROSANetworkStatus{}, + } + + rosaNetScopeParams := ROSANetworkScopeParams{ + Client: fakeClient, + ControllerName: "test-rosanet-controller", + Logger: logger.NewLogger(klog.Background()), + ROSANetwork: &rosaNetwork, + } + + rosaNetScope, err := NewROSANetworkScope(rosaNetScopeParams) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(rosaNetScope.ControllerName()).To(Equal("test-rosanet-controller")) + g.Expect(rosaNetScope.InfraCluster()).To(Equal(&rosaNetwork)) + g.Expect(rosaNetScope.InfraClusterName()).To(Equal("test-rosa-net")) + g.Expect(rosaNetScope.Namespace()).To(Equal("test-namespace")) + g.Expect(rosaNetScope.IdentityRef()).To(Equal(rosaNetwork.Spec.IdentityRef)) + g.Expect(rosaNetScope.Session()).ToNot(BeNil()) + + // AWSClusterStaticIdentity + rosaNetwork.Spec.IdentityRef.Name = "cluster-static-identity" + rosaNetwork.Spec.IdentityRef.Kind = "AWSClusterStaticIdentity" + rosaNetScope, err = NewROSANetworkScope(rosaNetScopeParams) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(rosaNetScope.Session()).ToNot(BeNil()) +} diff --git a/pkg/cloud/scope/rosaroleconfig.go b/pkg/cloud/scope/rosaroleconfig.go new file mode 100644 index 0000000000..1ee4c0d2c3 --- /dev/null +++ b/pkg/cloud/scope/rosaroleconfig.go @@ -0,0 +1,164 @@ +/* + Copyright 2025 The Kubernetes Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package scope + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/iam" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" + + infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" +) + +// RosaRoleConfigScopeParams defines the input parameters used to create a new RosaRoleConfigScope. +type RosaRoleConfigScopeParams struct { + Client client.Client + ControllerName string + Logger *logger.Logger + RosaRoleConfig *expinfrav1.ROSARoleConfig +} + +// RosaRoleConfigScope defines the basic context for an actuator to operate upon. +type RosaRoleConfigScope struct { + logger.Logger + Client client.Client + controllerName string + patchHelper *v1beta1patch.Helper + RosaRoleConfig *expinfrav1.ROSARoleConfig + serviceLimiters throttle.ServiceLimiters + session aws.Config + iamClient *iam.Client +} + +// NewRosaRoleConfigScope creates a new RosaRoleConfigScope from the supplied parameters. +func NewRosaRoleConfigScope(params RosaRoleConfigScopeParams) (*RosaRoleConfigScope, error) { + if params.Logger == nil { + log := klog.Background() + params.Logger = logger.NewLogger(log) + } + + RosaRoleConfigScope := &RosaRoleConfigScope{ + Logger: *params.Logger, + Client: params.Client, + controllerName: params.ControllerName, + patchHelper: nil, + RosaRoleConfig: params.RosaRoleConfig, + } + + session, serviceLimiters, err := sessionForClusterWithRegion(params.Client, RosaRoleConfigScope, "", params.Logger) + if err != nil { + return nil, errors.Errorf("failed to create aws V2 session: %v", err) + } + + iamClient := iam.NewFromConfig(*session) + + patchHelper, err := v1beta1patch.NewHelper(params.RosaRoleConfig, params.Client) + if err != nil { + return nil, errors.Wrap(err, "failed to init patch helper") + } + + RosaRoleConfigScope.patchHelper = patchHelper + RosaRoleConfigScope.session = *session + RosaRoleConfigScope.serviceLimiters = serviceLimiters + RosaRoleConfigScope.iamClient = iamClient + + return RosaRoleConfigScope, nil +} + +// IdentityRef returns the AWSIdentityReference object. +func (s *RosaRoleConfigScope) IdentityRef() *infrav1.AWSIdentityReference { + return s.RosaRoleConfig.Spec.IdentityRef +} + +// Session returns the AWS SDK V2 session. Used for creating clients. +func (s *RosaRoleConfigScope) Session() aws.Config { + return s.session +} + +// ServiceLimiter returns the AWS SDK session (used for creating clients). +func (s *RosaRoleConfigScope) ServiceLimiter(service string) *throttle.ServiceLimiter { + if sl, ok := s.serviceLimiters[service]; ok { + return sl + } + return nil +} + +// ControllerName returns the name of the controller. +func (s *RosaRoleConfigScope) ControllerName() string { + return s.controllerName +} + +// InfraCluster returns the RosaRoleConfig object. +// The method is then used in session.go to set proper Conditions for the RosaRoleConfig object. +func (s *RosaRoleConfigScope) InfraCluster() cloud.ClusterObject { + return s.RosaRoleConfig +} + +// InfraClusterName returns the name of the RosaRoleConfig object. +// The method is then used in session.go to set the key to the AWS session cache. +func (s *RosaRoleConfigScope) InfraClusterName() string { + return s.RosaRoleConfig.Name +} + +// Namespace returns the namespace of the RosaRoleConfig object. +// The method is then used in session.go to set the key to the AWS session cache. +func (s *RosaRoleConfigScope) Namespace() string { + return s.RosaRoleConfig.Namespace +} + +// GetClient Returns RosaRoleConfigScope client. +func (s *RosaRoleConfigScope) GetClient() client.Client { + return s.Client +} + +// PatchObject persists the RosaRoleConfig configuration and status. +func (s *RosaRoleConfigScope) PatchObject() error { + return s.patchHelper.Patch( + context.Background(), + s.RosaRoleConfig) +} + +// CredentialsSecret returns the CredentialsSecret object. +func (s *RosaRoleConfigScope) CredentialsSecret() *corev1.Secret { + secretRef := s.RosaRoleConfig.Spec.CredentialsSecretRef + if secretRef == nil { + return nil + } + + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: s.RosaRoleConfig.Spec.CredentialsSecretRef.Name, + Namespace: s.RosaRoleConfig.Namespace, + }, + } +} + +// IAMClient returns the IAM client. +func (s *RosaRoleConfigScope) IAMClient() *iam.Client { + return s.iamClient +} diff --git a/pkg/cloud/scope/session.go b/pkg/cloud/scope/session.go index ae6ff23244..541ecc97bb 100644 --- a/pkg/cloud/scope/session.go +++ b/pkg/cloud/scope/session.go @@ -40,18 +40,20 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/util/system" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" - "sigs.k8s.io/cluster-api/util/patch" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ) const ( notPermittedError = "Namespace is not permitted to use %s: %s" ) -var sessionCache sync.Map -var providerCache sync.Map +var ( + sessionCache sync.Map + providerCache sync.Map +) type sessionCacheEntry struct { session *aws.Config @@ -71,7 +73,6 @@ func sessionForRegion(region string) (*aws.Config, throttle.ServiceLimiters, err } ns, err := config.LoadDefaultConfig(context.Background(), config.WithRegion(region)) - if err != nil { return nil, nil, err } @@ -91,7 +92,7 @@ func sessionForClusterWithRegion(k8sClient client.Client, clusterScoper cloud.Se providers, err := getProvidersForCluster(context.Background(), k8sClient, clusterScoper, region, log) if err != nil { // could not get providers and retrieve the credentials - conditions.MarkFalse(clusterScoper.InfraCluster(), infrav1.PrincipalCredentialRetrievedCondition, infrav1.PrincipalCredentialRetrievalFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(clusterScoper.InfraCluster(), infrav1.PrincipalCredentialRetrievedCondition, infrav1.PrincipalCredentialRetrievalFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return nil, nil, errors.Wrap(err, "Failed to get providers for cluster") } @@ -129,7 +130,7 @@ func sessionForClusterWithRegion(k8sClient client.Client, clusterScoper cloud.Se // Check if identity credentials can be retrieved. One reason this will fail is that source identity is not authorized for assume role. _, err := providers[0].Retrieve(context.Background()) if err != nil { - conditions.MarkUnknown(clusterScoper.InfraCluster(), infrav1.PrincipalCredentialRetrievedCondition, infrav1.CredentialProviderBuildFailedReason, "%s", err.Error()) + v1beta1conditions.MarkUnknown(clusterScoper.InfraCluster(), infrav1.PrincipalCredentialRetrievedCondition, infrav1.CredentialProviderBuildFailedReason, "%s", err.Error()) // delete the existing session from cache. Otherwise, we give back a defective session on next method invocation with same cluster scope sessionCache.Delete(getSessionName(region, clusterScoper)) @@ -140,7 +141,7 @@ func sessionForClusterWithRegion(k8sClient client.Client, clusterScoper cloud.Se optFns = append(optFns, config.WithCredentialsProvider(chainProvider)) } - conditions.MarkTrue(clusterScoper.InfraCluster(), infrav1.PrincipalCredentialRetrievedCondition) + v1beta1conditions.MarkTrue(clusterScoper.InfraCluster(), infrav1.PrincipalCredentialRetrievedCondition) ns, err := config.LoadDefaultConfig(context.Background(), optFns...) if err != nil { @@ -156,7 +157,7 @@ func sessionForClusterWithRegion(k8sClient client.Client, clusterScoper cloud.Se } func getSessionName(region string, clusterScoper cloud.SessionMetadata) string { - return fmt.Sprintf("%s-%s-%s", region, clusterScoper.InfraClusterName(), clusterScoper.Namespace()) + return fmt.Sprintf("%s-%s-%s-%s", region, clusterScoper.ControllerName(), clusterScoper.InfraClusterName(), clusterScoper.Namespace()) } func newServiceLimiters() throttle.ServiceLimiters { @@ -226,7 +227,8 @@ func buildProvidersForRef( clusterScoper cloud.SessionMetadata, ref *infrav1.AWSIdentityReference, region string, - log logger.Wrapper) ([]identity.AWSPrincipalTypeProvider, error) { + log logger.Wrapper, +) ([]identity.AWSPrincipalTypeProvider, error) { if ref == nil { log.Trace("AWSCluster does not have a IdentityRef specified") return providers, nil @@ -288,21 +290,21 @@ func buildProvidersForRef( default: return providers, errors.Errorf("No such provider known: '%s'", ref.Kind) } - conditions.MarkTrue(clusterScoper.InfraCluster(), infrav1.PrincipalUsageAllowedCondition) + v1beta1conditions.MarkTrue(clusterScoper.InfraCluster(), infrav1.PrincipalUsageAllowedCondition) return providers, nil } func setPrincipalUsageAllowedCondition(clusterScoper cloud.SessionMetadata) { - conditions.MarkTrue(clusterScoper.InfraCluster(), infrav1.PrincipalUsageAllowedCondition) + v1beta1conditions.MarkTrue(clusterScoper.InfraCluster(), infrav1.PrincipalUsageAllowedCondition) } func setPrincipalUsageNotAllowedCondition(kind infrav1.AWSIdentityKind, identityObjectKey client.ObjectKey, clusterScoper cloud.SessionMetadata) { errMsg := fmt.Sprintf(notPermittedError, kind, identityObjectKey.Name) if clusterScoper.IdentityRef().Name == identityObjectKey.Name { - conditions.MarkFalse(clusterScoper.InfraCluster(), infrav1.PrincipalUsageAllowedCondition, infrav1.PrincipalUsageUnauthorizedReason, clusterv1.ConditionSeverityError, "%s", errMsg) + v1beta1conditions.MarkFalse(clusterScoper.InfraCluster(), infrav1.PrincipalUsageAllowedCondition, infrav1.PrincipalUsageUnauthorizedReason, clusterv1beta1.ConditionSeverityError, "%s", errMsg) } else { - conditions.MarkFalse(clusterScoper.InfraCluster(), infrav1.PrincipalUsageAllowedCondition, infrav1.SourcePrincipalUsageUnauthorizedReason, clusterv1.ConditionSeverityError, "%s", errMsg) + v1beta1conditions.MarkFalse(clusterScoper.InfraCluster(), infrav1.PrincipalUsageAllowedCondition, infrav1.SourcePrincipalUsageUnauthorizedReason, clusterv1beta1.ConditionSeverityError, "%s", errMsg) } } @@ -319,7 +321,7 @@ func buildAWSClusterStaticIdentity(ctx context.Context, identityObjectKey client } // Set ClusterStaticPrincipal as Secret's owner reference for 'clusterctl move'. - patchHelper, err := patch.NewHelper(secret, k8sClient) + patchHelper, err := v1beta1patch.NewHelper(secret, k8sClient) if err != nil { return nil, errors.Wrapf(err, "failed to init patch helper for secret name:%s namespace:%s", secret.Name, secret.Namespace) } diff --git a/pkg/cloud/scope/session_test.go b/pkg/cloud/scope/session_test.go index 1035ca6562..39dc491d84 100644 --- a/pkg/cloud/scope/session_test.go +++ b/pkg/cloud/scope/session_test.go @@ -34,7 +34,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/identity" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/util/system" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func TestIsClusterPermittedToUsePrincipal(t *testing.T) { diff --git a/pkg/cloud/scope/sg.go b/pkg/cloud/scope/sg.go index 05409d835c..fd792ccb52 100644 --- a/pkg/cloud/scope/sg.go +++ b/pkg/cloud/scope/sg.go @@ -44,6 +44,7 @@ type SGScope interface { Bastion() *infrav1.Bastion // ControlPlaneLoadBalancer returns the load balancer settings that are requested. + // // Deprecated: Use ControlPlaneLoadBalancers() ControlPlaneLoadBalancer() *infrav1.AWSLoadBalancerSpec diff --git a/pkg/cloud/scope/shared.go b/pkg/cloud/scope/shared.go index cde09c9dff..099b112db0 100644 --- a/pkg/cloud/scope/shared.go +++ b/pkg/cloud/scope/shared.go @@ -27,7 +27,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/controllers/external" ) @@ -136,15 +136,15 @@ func (p *defaultSubnetPlacementStrategy) getSubnetsForAZs(azs []string, controlP // getUnstructuredControlPlane returns the unstructured object for the control plane, if any. // When the reference is not set, it returns an empty object. func getUnstructuredControlPlane(ctx context.Context, client client.Client, cluster *clusterv1.Cluster) (*unstructured.Unstructured, error) { - if cluster.Spec.ControlPlaneRef == nil { + if !cluster.Spec.ControlPlaneRef.IsDefined() { // If the control plane ref is not set, return an empty object. // Not having a control plane ref is valid given API contracts. return &unstructured.Unstructured{}, nil } - u, err := external.Get(ctx, client, cluster.Spec.ControlPlaneRef) + u, err := external.GetObjectFromContractVersionedRef(ctx, client, cluster.Spec.ControlPlaneRef, cluster.Namespace) if err != nil { - return nil, errors.Wrapf(err, "failed to retrieve control plane object %s/%s", cluster.Spec.ControlPlaneRef.Namespace, cluster.Spec.ControlPlaneRef.Name) + return nil, errors.Wrapf(err, "failed to retrieve control plane object %s/%s", cluster.Namespace, cluster.Spec.ControlPlaneRef.Name) } return u, nil } diff --git a/pkg/cloud/services/autoscaling/autoscalinggroup.go b/pkg/cloud/services/autoscaling/autoscalinggroup.go index f09538e56f..e3355b3eb7 100644 --- a/pkg/cloud/services/autoscaling/autoscalinggroup.go +++ b/pkg/cloud/services/autoscaling/autoscalinggroup.go @@ -305,30 +305,50 @@ func (s *Service) UpdateASG(machinePoolScope *scope.MachinePoolScope) error { return nil } -// CanStartASGInstanceRefresh will start an ASG instance with refresh. -func (s *Service) CanStartASGInstanceRefresh(scope *scope.MachinePoolScope) (bool, error) { +// CanStartASGInstanceRefresh checks if a new ASG instance refresh can currently be started, and returns the status if there is an existing, unfinished refresh. +func (s *Service) CanStartASGInstanceRefresh(scope *scope.MachinePoolScope) (bool, *autoscalingtypes.InstanceRefreshStatus, error) { describeInput := &autoscaling.DescribeInstanceRefreshesInput{AutoScalingGroupName: aws.String(scope.Name())} refreshes, err := s.ASGClient.DescribeInstanceRefreshes(context.TODO(), describeInput) if err != nil { - return false, err - } - hasUnfinishedRefresh := false - if len(refreshes.InstanceRefreshes) != 0 { - for i := range refreshes.InstanceRefreshes { - if refreshes.InstanceRefreshes[i].Status == autoscalingtypes.InstanceRefreshStatusInProgress || - refreshes.InstanceRefreshes[i].Status == autoscalingtypes.InstanceRefreshStatusPending || - refreshes.InstanceRefreshes[i].Status == autoscalingtypes.InstanceRefreshStatusCancelling { - hasUnfinishedRefresh = true - } + return false, nil, err + } + var unfinishedRefreshStatus *autoscalingtypes.InstanceRefreshStatus + for _, refresh := range refreshes.InstanceRefreshes { + if refresh.Status == autoscalingtypes.InstanceRefreshStatusInProgress || + refresh.Status == autoscalingtypes.InstanceRefreshStatusPending || + refresh.Status == autoscalingtypes.InstanceRefreshStatusCancelling { + unfinishedRefreshStatus = &refresh.Status } } - if hasUnfinishedRefresh { - return false, nil + if unfinishedRefreshStatus != nil { + // There's an unfinished instance refresh, so no other refresh can be started right now + return false, unfinishedRefreshStatus, nil + } + return true, nil, nil +} + +// CancelASGInstanceRefresh cancels an ASG instance refresh. +func (s *Service) CancelASGInstanceRefresh(scope *scope.MachinePoolScope) error { + input := &autoscaling.CancelInstanceRefreshInput{ + AutoScalingGroupName: aws.String(scope.Name()), } - return true, nil + + if _, err := s.ASGClient.CancelInstanceRefresh(context.TODO(), input); err != nil { + smithyErr := awserrors.ParseSmithyError(err) + if smithyErr.ErrorCode() == (&autoscalingtypes.ActiveInstanceRefreshNotFoundFault{}).ErrorCode() { + // Refresh isn't "in progress". It may have turned to cancelled status + // by now. So this is not an error for us because we may have called + // CancelInstanceRefresh multiple times and should be idempotent here. + return nil + } + + return errors.Wrapf(err, "failed to cancel ASG instance refresh %q", scope.Name()) + } + + return nil } -// StartASGInstanceRefresh will start an ASG instance with refresh. +// StartASGInstanceRefresh will start an ASG instance refresh. func (s *Service) StartASGInstanceRefresh(scope *scope.MachinePoolScope) error { strategy := ptr.To(autoscalingtypes.RefreshStrategyRolling) var minHealthyPercentage, maxHealthyPercentage, instanceWarmup *int32 @@ -503,7 +523,7 @@ func mapToTags(input map[string]string, resourceID *string) []autoscalingtypes.T // SubnetIDs return subnet IDs of a AWSMachinePool based on given subnetIDs and filters. func (s *Service) SubnetIDs(scope *scope.MachinePoolScope) ([]string, error) { subnetIDs := make([]string, 0) - var inputFilters = make([]ec2types.Filter, 0) + inputFilters := make([]ec2types.Filter, 0) for _, subnet := range scope.AWSMachinePool.Spec.Subnets { switch { diff --git a/pkg/cloud/services/autoscaling/autoscalinggroup_test.go b/pkg/cloud/services/autoscaling/autoscalinggroup_test.go index 53f1222072..392fbfc93f 100644 --- a/pkg/cloud/services/autoscaling/autoscalinggroup_test.go +++ b/pkg/cloud/services/autoscaling/autoscalinggroup_test.go @@ -42,8 +42,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/autoscaling/mock_autoscalingiface" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func TestServiceGetASGByName(t *testing.T) { @@ -1125,10 +1124,11 @@ func TestServiceCanStartASGInstanceRefresh(t *testing.T) { defer mockCtrl.Finish() tests := []struct { - name string - wantErr bool - canStart bool - expect func(m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) + name string + wantErr bool + wantUnfinishedRefreshStatus *string + canStart bool + expect func(m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) }{ { name: "should return error if describe instance refresh failed", @@ -1153,9 +1153,10 @@ func TestServiceCanStartASGInstanceRefresh(t *testing.T) { }, }, { - name: "should return false if some instances have unfinished refresh", - wantErr: false, - canStart: false, + name: "should return false if some instances have unfinished refresh", + wantErr: false, + wantUnfinishedRefreshStatus: aws.String(string(autoscalingtypes.InstanceRefreshStatusInProgress)), + canStart: false, expect: func(m *mock_autoscalingiface.MockAutoScalingAPIMockRecorder) { m.DescribeInstanceRefreshes(context.TODO(), gomock.Eq(&autoscaling.DescribeInstanceRefreshesInput{ AutoScalingGroupName: aws.String("machinePoolName"), @@ -1187,13 +1188,14 @@ func TestServiceCanStartASGInstanceRefresh(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) mps.AWSMachinePool.Name = "machinePoolName" - out, err := s.CanStartASGInstanceRefresh(mps) + out, unfinishedRefreshStatus, err := s.CanStartASGInstanceRefresh(mps) checkErr(tt.wantErr, err, g) if tt.canStart { g.Expect(out).To(BeTrue()) - return + } else { + g.Expect(out).To(BeFalse()) + g.Expect(unfinishedRefreshStatus).To(BeEquivalentTo(tt.wantUnfinishedRefreshStatus)) } - g.Expect(out).To(BeFalse()) }) } } @@ -1267,7 +1269,7 @@ func getFakeClient() client.Client { scheme := runtime.NewScheme() _ = infrav1.AddToScheme(scheme) _ = expinfrav1.AddToScheme(scheme) - _ = expclusterv1.AddToScheme(scheme) + _ = clusterv1.AddToScheme(scheme) return fake.NewClientBuilder().WithScheme(scheme).Build() } @@ -1350,7 +1352,7 @@ func getMachinePoolScope(client client.Client, clusterScope *scope.ClusterScope) mps, err := scope.NewMachinePoolScope(scope.MachinePoolScopeParams{ Client: client, Cluster: clusterScope.Cluster, - MachinePool: &expclusterv1.MachinePool{}, + MachinePool: &clusterv1.MachinePool{}, InfraCluster: clusterScope, AWSMachinePool: awsMachinePool, }) diff --git a/pkg/cloud/services/autoscaling/lifecyclehook.go b/pkg/cloud/services/autoscaling/lifecyclehook.go index 293070fab1..a494a4a242 100644 --- a/pkg/cloud/services/autoscaling/lifecyclehook.go +++ b/pkg/cloud/services/autoscaling/lifecyclehook.go @@ -30,8 +30,8 @@ import ( expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + deprecatedv1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" ) // DescribeLifecycleHooks returns the lifecycle hooks for the given AutoScalingGroup after retrieving them from the AWS API. @@ -160,7 +160,7 @@ func getLifecycleHookSpecificationList(lifecycleHooks []expinfrav1.AWSLifecycleH // by creating missing hooks, updating mismatching hooks and // deleting extraneous hooks (except those specified in // ignoreLifecycleHooks). -func ReconcileLifecycleHooks(ctx context.Context, asgService services.ASGInterface, asgName string, wantedLifecycleHooks []expinfrav1.AWSLifecycleHook, ignoreLifecycleHooks map[string]bool, storeConditionsOnObject conditions.Setter, log logger.Wrapper) error { +func ReconcileLifecycleHooks(ctx context.Context, asgService services.ASGInterface, asgName string, wantedLifecycleHooks []expinfrav1.AWSLifecycleHook, ignoreLifecycleHooks map[string]bool, storeConditionsOnObject deprecatedv1beta1conditions.Setter, log logger.Wrapper) error { existingHooks, err := asgService.DescribeLifecycleHooks(asgName) if err != nil { return err @@ -191,7 +191,7 @@ func ReconcileLifecycleHooks(ctx context.Context, asgService services.ASGInterfa if !found { log.Info("Deleting extraneous lifecycle hook", "hook", existingHook.Name) if err := asgService.DeleteLifecycleHook(ctx, asgName, existingHook); err != nil { - conditions.MarkFalse(storeConditionsOnObject, expinfrav1.LifecycleHookReadyCondition, expinfrav1.LifecycleHookDeletionFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + deprecatedv1beta1conditions.MarkFalse(storeConditionsOnObject, clusterv1.ConditionType(expinfrav1.LifecycleHookReadyCondition), expinfrav1.LifecycleHookDeletionFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) return err } } @@ -204,11 +204,12 @@ func lifecycleHookNeedsUpdate(existing *expinfrav1.AWSLifecycleHook, expected *e return ptr.Deref(existing.DefaultResult, expinfrav1.LifecycleHookDefaultResultAbandon) != ptr.Deref(expected.DefaultResult, expinfrav1.LifecycleHookDefaultResultAbandon) || ptr.Deref(existing.HeartbeatTimeout, metav1.Duration{Duration: 3600 * time.Second}) != ptr.Deref(expected.HeartbeatTimeout, metav1.Duration{Duration: 3600 * time.Second}) || existing.LifecycleTransition != expected.LifecycleTransition || - existing.NotificationTargetARN != expected.NotificationTargetARN || - existing.NotificationMetadata != expected.NotificationMetadata + ptr.Deref(existing.NotificationTargetARN, "") != ptr.Deref(expected.NotificationTargetARN, "") || + ptr.Deref(existing.RoleARN, "") != ptr.Deref(expected.RoleARN, "") || + ptr.Deref(existing.NotificationMetadata, "") != ptr.Deref(expected.NotificationMetadata, "") } -func reconcileLifecycleHook(ctx context.Context, asgService services.ASGInterface, asgName string, wantedHook *expinfrav1.AWSLifecycleHook, existingHooks []*expinfrav1.AWSLifecycleHook, storeConditionsOnObject conditions.Setter, log logger.Wrapper) error { +func reconcileLifecycleHook(ctx context.Context, asgService services.ASGInterface, asgName string, wantedHook *expinfrav1.AWSLifecycleHook, existingHooks []*expinfrav1.AWSLifecycleHook, storeConditionsOnObject deprecatedv1beta1conditions.Setter, log logger.Wrapper) error { log = log.WithValues("hook", wantedHook.Name) log.Info("Checking for existing lifecycle hook") @@ -223,7 +224,7 @@ func reconcileLifecycleHook(ctx context.Context, asgService services.ASGInterfac if existingHook == nil { log.Info("Creating lifecycle hook") if err := asgService.CreateLifecycleHook(ctx, asgName, wantedHook); err != nil { - conditions.MarkFalse(storeConditionsOnObject, expinfrav1.LifecycleHookReadyCondition, expinfrav1.LifecycleHookCreationFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + deprecatedv1beta1conditions.MarkFalse(storeConditionsOnObject, clusterv1.ConditionType(expinfrav1.LifecycleHookReadyCondition), expinfrav1.LifecycleHookCreationFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) return err } return nil @@ -232,11 +233,11 @@ func reconcileLifecycleHook(ctx context.Context, asgService services.ASGInterfac if lifecycleHookNeedsUpdate(existingHook, wantedHook) { log.Info("Updating lifecycle hook") if err := asgService.UpdateLifecycleHook(ctx, asgName, wantedHook); err != nil { - conditions.MarkFalse(storeConditionsOnObject, expinfrav1.LifecycleHookReadyCondition, expinfrav1.LifecycleHookUpdateFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + deprecatedv1beta1conditions.MarkFalse(storeConditionsOnObject, clusterv1.ConditionType(expinfrav1.LifecycleHookReadyCondition), expinfrav1.LifecycleHookUpdateFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) return err } } - conditions.MarkTrue(storeConditionsOnObject, expinfrav1.LifecycleHookReadyCondition) + deprecatedv1beta1conditions.MarkTrue(storeConditionsOnObject, clusterv1.ConditionType(expinfrav1.LifecycleHookReadyCondition)) return nil } diff --git a/pkg/cloud/services/autoscaling/lifecyclehook_test.go b/pkg/cloud/services/autoscaling/lifecyclehook_test.go index 5aba913f64..865d3007b1 100644 --- a/pkg/cloud/services/autoscaling/lifecyclehook_test.go +++ b/pkg/cloud/services/autoscaling/lifecyclehook_test.go @@ -22,6 +22,7 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" ) @@ -53,6 +54,29 @@ func TestLifecycleHookNeedsUpdate(t *testing.T) { wantUpdate: false, }, + { + name: "exactly equal (all fields filled)", + existing: expinfrav1.AWSLifecycleHook{ + Name: "test", + NotificationTargetARN: ptr.To("arn:aws:sqs:eu-west-1:123456789012:mycluster-nth"), + RoleARN: ptr.To("arn:aws:iam::123456789012:role/mycluster-nth-notification"), + LifecycleTransition: "autoscaling:EC2_INSTANCE_TERMINATING", + HeartbeatTimeout: &metav1.Duration{Duration: 3600 * time.Second}, + DefaultResult: &defaultResultAbandon, + NotificationMetadata: nil, + }, + expected: expinfrav1.AWSLifecycleHook{ + Name: "test", + NotificationTargetARN: ptr.To("arn:aws:sqs:eu-west-1:123456789012:mycluster-nth"), + RoleARN: ptr.To("arn:aws:iam::123456789012:role/mycluster-nth-notification"), + LifecycleTransition: "autoscaling:EC2_INSTANCE_TERMINATING", + HeartbeatTimeout: &metav1.Duration{Duration: 3600 * time.Second}, + DefaultResult: &defaultResultAbandon, + NotificationMetadata: nil, + }, + wantUpdate: false, + }, + { name: "heartbeatTimeout and defaultResult not set in manifest, but set to defaults by AWS", existing: expinfrav1.AWSLifecycleHook{ @@ -119,6 +143,90 @@ func TestLifecycleHookNeedsUpdate(t *testing.T) { }, wantUpdate: true, }, + + { + name: "role ARN differs", + existing: expinfrav1.AWSLifecycleHook{ + Name: "test", + NotificationTargetARN: ptr.To("arn:aws:sqs:eu-west-1:123456789012:mycluster-nth"), + RoleARN: ptr.To("arn:aws:iam::123456789012:role/mycluster-nth-notification"), + LifecycleTransition: "autoscaling:EC2_INSTANCE_TERMINATING", + HeartbeatTimeout: &metav1.Duration{Duration: 3600 * time.Second}, + DefaultResult: &defaultResultAbandon, + NotificationMetadata: nil, + }, + expected: expinfrav1.AWSLifecycleHook{ + Name: "test", + NotificationTargetARN: ptr.To("arn:aws:sqs:eu-west-1:123456789012:mycluster-nth"), + RoleARN: ptr.To("arn:aws:iam::123456789012:role/mycluster-nth-notification2"), + LifecycleTransition: "autoscaling:EC2_INSTANCE_TERMINATING", + HeartbeatTimeout: &metav1.Duration{Duration: 3600 * time.Second}, + DefaultResult: &defaultResultAbandon, + NotificationMetadata: nil, + }, + wantUpdate: true, + }, + + { + name: "notification target ARN differs", + existing: expinfrav1.AWSLifecycleHook{ + Name: "test", + NotificationTargetARN: ptr.To("arn:aws:sqs:eu-west-1:123456789012:mycluster-nth"), + RoleARN: ptr.To("arn:aws:iam::123456789012:role/mycluster-nth-notification"), + LifecycleTransition: "autoscaling:EC2_INSTANCE_TERMINATING", + HeartbeatTimeout: &metav1.Duration{Duration: 3600 * time.Second}, + DefaultResult: &defaultResultAbandon, + NotificationMetadata: nil, + }, + expected: expinfrav1.AWSLifecycleHook{ + Name: "test", + NotificationTargetARN: ptr.To("arn:aws:sqs:eu-west-1:123456789012:mycluster-nth2"), + RoleARN: ptr.To("arn:aws:iam::123456789012:role/mycluster-nth-notification"), + LifecycleTransition: "autoscaling:EC2_INSTANCE_TERMINATING", + HeartbeatTimeout: &metav1.Duration{Duration: 3600 * time.Second}, + DefaultResult: &defaultResultAbandon, + NotificationMetadata: nil, + }, + wantUpdate: true, + }, + + { + name: "notification metadata both empty", + existing: expinfrav1.AWSLifecycleHook{ + Name: "test", + LifecycleTransition: "autoscaling:EC2_INSTANCE_TERMINATING", + HeartbeatTimeout: &metav1.Duration{Duration: 3600 * time.Second}, + DefaultResult: &defaultResultAbandon, + NotificationMetadata: nil, + }, + expected: expinfrav1.AWSLifecycleHook{ + Name: "test", + LifecycleTransition: "autoscaling:EC2_INSTANCE_TERMINATING", + HeartbeatTimeout: &metav1.Duration{Duration: 3600 * time.Second}, + DefaultResult: &defaultResultAbandon, + NotificationMetadata: ptr.To(""), + }, + wantUpdate: false, + }, + + { + name: "notification metadata differs", + existing: expinfrav1.AWSLifecycleHook{ + Name: "test", + LifecycleTransition: "autoscaling:EC2_INSTANCE_TERMINATING", + HeartbeatTimeout: &metav1.Duration{Duration: 3600 * time.Second}, + DefaultResult: &defaultResultAbandon, + NotificationMetadata: ptr.To("abc"), + }, + expected: expinfrav1.AWSLifecycleHook{ + Name: "test", + LifecycleTransition: "autoscaling:EC2_INSTANCE_TERMINATING", + HeartbeatTimeout: &metav1.Duration{Duration: 3600 * time.Second}, + DefaultResult: &defaultResultAbandon, + NotificationMetadata: ptr.To("xyz"), + }, + wantUpdate: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { diff --git a/pkg/cloud/services/autoscaling/mock_autoscalingiface/autoscaling_mock.go b/pkg/cloud/services/autoscaling/mock_autoscalingiface/autoscaling_mock.go index 2f03a558db..b589873c98 100644 --- a/pkg/cloud/services/autoscaling/mock_autoscalingiface/autoscaling_mock.go +++ b/pkg/cloud/services/autoscaling/mock_autoscalingiface/autoscaling_mock.go @@ -51,6 +51,26 @@ func (m *MockAutoScalingAPI) EXPECT() *MockAutoScalingAPIMockRecorder { return m.recorder } +// CancelInstanceRefresh mocks base method. +func (m *MockAutoScalingAPI) CancelInstanceRefresh(arg0 context.Context, arg1 *autoscaling.CancelInstanceRefreshInput, arg2 ...func(*autoscaling.Options)) (*autoscaling.CancelInstanceRefreshOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CancelInstanceRefresh", varargs...) + ret0, _ := ret[0].(*autoscaling.CancelInstanceRefreshOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CancelInstanceRefresh indicates an expected call of CancelInstanceRefresh. +func (mr *MockAutoScalingAPIMockRecorder) CancelInstanceRefresh(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CancelInstanceRefresh", reflect.TypeOf((*MockAutoScalingAPI)(nil).CancelInstanceRefresh), varargs...) +} + // CreateAutoScalingGroup mocks base method. func (m *MockAutoScalingAPI) CreateAutoScalingGroup(arg0 context.Context, arg1 *autoscaling.CreateAutoScalingGroupInput, arg2 ...func(*autoscaling.Options)) (*autoscaling.CreateAutoScalingGroupOutput, error) { m.ctrl.T.Helper() diff --git a/pkg/cloud/services/autoscaling/service.go b/pkg/cloud/services/autoscaling/service.go index 3c1591af22..d88c762ee4 100644 --- a/pkg/cloud/services/autoscaling/service.go +++ b/pkg/cloud/services/autoscaling/service.go @@ -37,6 +37,7 @@ type Service struct { // AutoScalingAPI is an interface for the AWS AutoScaling API client. type AutoScalingAPI interface { + CancelInstanceRefresh(ctx context.Context, params *autoscaling.CancelInstanceRefreshInput, optFns ...func(*autoscaling.Options)) (*autoscaling.CancelInstanceRefreshOutput, error) CreateAutoScalingGroup(ctx context.Context, params *autoscaling.CreateAutoScalingGroupInput, optFns ...func(*autoscaling.Options)) (*autoscaling.CreateAutoScalingGroupOutput, error) DeleteAutoScalingGroup(ctx context.Context, params *autoscaling.DeleteAutoScalingGroupInput, optFns ...func(*autoscaling.Options)) (*autoscaling.DeleteAutoScalingGroupOutput, error) DescribeAutoScalingGroups(ctx context.Context, params *autoscaling.DescribeAutoScalingGroupsInput, optFns ...func(*autoscaling.Options)) (*autoscaling.DescribeAutoScalingGroupsOutput, error) diff --git a/pkg/cloud/services/common/common.go b/pkg/cloud/services/common/common.go index 3561534647..76ffdb2333 100644 --- a/pkg/cloud/services/common/common.go +++ b/pkg/cloud/services/common/common.go @@ -26,6 +26,7 @@ import ( // EC2API defines the EC2 API interface. type EC2API interface { AllocateAddress(ctx context.Context, params *ec2.AllocateAddressInput, optFns ...func(*ec2.Options)) (*ec2.AllocateAddressOutput, error) + AllocateHosts(ctx context.Context, params *ec2.AllocateHostsInput, optFns ...func(*ec2.Options)) (*ec2.AllocateHostsOutput, error) AssociateAddress(ctx context.Context, params *ec2.AssociateAddressInput, optFns ...func(*ec2.Options)) (*ec2.AssociateAddressOutput, error) AssociateRouteTable(ctx context.Context, params *ec2.AssociateRouteTableInput, optFns ...func(*ec2.Options)) (*ec2.AssociateRouteTableOutput, error) AssociateVpcCidrBlock(ctx context.Context, params *ec2.AssociateVpcCidrBlockInput, optFns ...func(*ec2.Options)) (*ec2.AssociateVpcCidrBlockOutput, error) @@ -61,6 +62,7 @@ type EC2API interface { DescribeCarrierGateways(ctx context.Context, params *ec2.DescribeCarrierGatewaysInput, optFns ...func(*ec2.Options)) (*ec2.DescribeCarrierGatewaysOutput, error) DescribeDhcpOptions(ctx context.Context, params *ec2.DescribeDhcpOptionsInput, optFns ...func(*ec2.Options)) (*ec2.DescribeDhcpOptionsOutput, error) DescribeEgressOnlyInternetGateways(ctx context.Context, params *ec2.DescribeEgressOnlyInternetGatewaysInput, optFns ...func(*ec2.Options)) (*ec2.DescribeEgressOnlyInternetGatewaysOutput, error) + DescribeHosts(ctx context.Context, params *ec2.DescribeHostsInput, optFns ...func(*ec2.Options)) (*ec2.DescribeHostsOutput, error) DescribeImages(ctx context.Context, params *ec2.DescribeImagesInput, optFns ...func(*ec2.Options)) (*ec2.DescribeImagesOutput, error) DescribeInstances(ctx context.Context, params *ec2.DescribeInstancesInput, optFns ...func(*ec2.Options)) (*ec2.DescribeInstancesOutput, error) DescribeInstanceTypes(context.Context, *ec2.DescribeInstanceTypesInput, ...func(*ec2.Options)) (*ec2.DescribeInstanceTypesOutput, error) @@ -87,6 +89,7 @@ type EC2API interface { ModifyVpcAttribute(ctx context.Context, params *ec2.ModifyVpcAttributeInput, optFns ...func(*ec2.Options)) (*ec2.ModifyVpcAttributeOutput, error) ModifyVpcEndpoint(ctx context.Context, params *ec2.ModifyVpcEndpointInput, optFns ...func(*ec2.Options)) (*ec2.ModifyVpcEndpointOutput, error) ReleaseAddress(ctx context.Context, params *ec2.ReleaseAddressInput, optFns ...func(*ec2.Options)) (*ec2.ReleaseAddressOutput, error) + ReleaseHosts(ctx context.Context, params *ec2.ReleaseHostsInput, optFns ...func(*ec2.Options)) (*ec2.ReleaseHostsOutput, error) ReplaceRoute(ctx context.Context, params *ec2.ReplaceRouteInput, optFns ...func(*ec2.Options)) (*ec2.ReplaceRouteOutput, error) RevokeSecurityGroupEgress(ctx context.Context, params *ec2.RevokeSecurityGroupEgressInput, optFns ...func(*ec2.Options)) (*ec2.RevokeSecurityGroupEgressOutput, error) RevokeSecurityGroupIngress(ctx context.Context, params *ec2.RevokeSecurityGroupIngressInput, optFns ...func(*ec2.Options)) (*ec2.RevokeSecurityGroupIngressOutput, error) diff --git a/pkg/cloud/services/ec2/ami.go b/pkg/cloud/services/ec2/ami.go index 769280cd8e..aa657313d8 100644 --- a/pkg/cloud/services/ec2/ami.go +++ b/pkg/cloud/services/ec2/ami.go @@ -213,10 +213,10 @@ func DefaultAMILookup(ec2Client common.EC2API, ownerID, baseOS, kubernetesVersio out, err := ec2Client.DescribeImages(context.TODO(), describeImageInput) if err != nil { - return nil, errors.Wrapf(err, "failed to find ami: %q", amiName) + return nil, errors.Wrapf(err, "failed to find AMI with name %q, architecture %q, ownerID %q", amiName, architecture, ownerID) } if out == nil || len(out.Images) == 0 { - return nil, errors.Errorf("found no AMIs with the name: %q", amiName) + return nil, errors.Errorf("found no AMIs with name %q, architecture %q, ownerID %q", amiName, architecture, ownerID) } latestImage, err := GetLatestImage(out.Images) if err != nil { diff --git a/pkg/cloud/services/ec2/bastion.go b/pkg/cloud/services/ec2/bastion.go index 8d31916530..e66bcadaad 100644 --- a/pkg/cloud/services/ec2/bastion.go +++ b/pkg/cloud/services/ec2/bastion.go @@ -32,8 +32,8 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/filter" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) const ( @@ -72,8 +72,8 @@ func (s *Service) ReconcileBastion() error { // Describe bastion instance, if any. instance, err := s.describeBastionInstance() if awserrors.IsNotFound(err) { //nolint:nestif - if !conditions.Has(s.scope.InfraCluster(), infrav1.BastionHostReadyCondition) { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.BastionHostReadyCondition, infrav1.BastionCreationStartedReason, clusterv1.ConditionSeverityInfo, "") + if !v1beta1conditions.Has(s.scope.InfraCluster(), infrav1.BastionHostReadyCondition) { + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.BastionHostReadyCondition, infrav1.BastionCreationStartedReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return errors.Wrap(err, "failed to patch conditions") } @@ -98,7 +98,7 @@ func (s *Service) ReconcileBastion() error { // TODO(vincepri): check for possible changes between the default spec and the instance. s.scope.SetBastionInstance(instance.DeepCopy()) - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.BastionHostReadyCondition) + v1beta1conditions.MarkTrue(s.scope.InfraCluster(), infrav1.BastionHostReadyCondition) s.scope.Debug("Reconcile bastion completed successfully") return nil @@ -115,20 +115,20 @@ func (s *Service) DeleteBastion() error { return errors.Wrap(err, "unable to describe bastion instance") } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.BastionHostReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.BastionHostReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return err } if err := s.TerminateInstanceAndWait(instance.ID); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.BastionHostReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.BastionHostReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) record.Warnf(s.scope.InfraCluster(), "FailedTerminateBastion", "Failed to terminate bastion instance %q: %v", instance.ID, err) return errors.Wrap(err, "unable to delete bastion instance") } s.scope.SetBastionInstance(nil) - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.BastionHostReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.BastionHostReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") record.Eventf(s.scope.InfraCluster(), "SuccessfulTerminateBastion", "Terminated bastion instance %q", instance.ID) s.scope.Info("Deleted bastion host", "id", instance.ID) diff --git a/pkg/cloud/services/ec2/bastion_test.go b/pkg/cloud/services/ec2/bastion_test.go index e48a540935..aeb6ad5518 100644 --- a/pkg/cloud/services/ec2/bastion_test.go +++ b/pkg/cloud/services/ec2/bastion_test.go @@ -34,7 +34,8 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/filter" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func TestServiceDeleteBastion(t *testing.T) { @@ -419,7 +420,7 @@ func TestServiceReconcileBastion(t *testing.T) { SubnetID: "subnet-1", ImageID: "ubuntu-ami-id-latest", IAMProfile: "foo", - Addresses: []clusterv1.MachineAddress{}, + Addresses: []clusterv1beta1.MachineAddress{}, AvailabilityZone: "us-east-1", VolumeIDs: []string{"volume-1"}, }, @@ -450,10 +451,10 @@ func TestServiceReconcileBastion(t *testing.T) { ID: "vpcID", }, Subnets: infrav1.Subnets{ - { + infrav1.SubnetSpec{ ID: "subnet-1", }, - { + infrav1.SubnetSpec{ ID: "subnet-2", IsPublic: true, }, @@ -651,7 +652,7 @@ func TestServiceReconcileBastionUSGOV(t *testing.T) { SubnetID: "subnet-1", ImageID: "ubuntu-ami-id-latest", IAMProfile: "foo", - Addresses: []clusterv1.MachineAddress{}, + Addresses: []clusterv1beta1.MachineAddress{}, AvailabilityZone: "us-gov-east-1", VolumeIDs: []string{"volume-1"}, }, @@ -682,10 +683,10 @@ func TestServiceReconcileBastionUSGOV(t *testing.T) { ID: "vpcID", }, Subnets: infrav1.Subnets{ - { + infrav1.SubnetSpec{ ID: "subnet-1", }, - { + infrav1.SubnetSpec{ ID: "subnet-2", IsPublic: true, }, diff --git a/pkg/cloud/services/ec2/dedicatedhosts.go b/pkg/cloud/services/ec2/dedicatedhosts.go new file mode 100644 index 0000000000..676fe6451f --- /dev/null +++ b/pkg/cloud/services/ec2/dedicatedhosts.go @@ -0,0 +1,258 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ec2 + +import ( + "context" + "fmt" + "math" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/service/ec2" + "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/pkg/errors" + + infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" +) + +// AllocateDedicatedHost allocates a single dedicated host based on the specification. +// This function always allocates exactly one dedicated host per call. +// The dedicated host will inherit additional tags defined in the AWSMachineTemplate. +func (s *Service) AllocateDedicatedHost(ctx context.Context, spec *infrav1.DynamicHostAllocationSpec, instanceType, availabilityZone string, scope *scope.MachineScope) (string, error) { + s.scope.Debug("Allocating single dedicated host", "instanceType", instanceType, "availabilityZone", availabilityZone) + input := &ec2.AllocateHostsInput{ + InstanceType: aws.String(instanceType), + AvailabilityZone: aws.String(availabilityZone), + Quantity: aws.Int32(1), + } + + // Build tags for the dedicated host + // Only include additionalTags from the machine and dedicated host specific tags + additionalTags := scope.AdditionalTags() + + // Start with additional tags from the machine (AWSMachineTemplate additionalTags) + dedicatedHostTags := make(map[string]string) + for key, value := range additionalTags { + dedicatedHostTags[key] = value + } + + // Merge in dedicated host specific tags from the spec + // Dedicated host specific tags take precedence over additional tags + for key, value := range spec.Tags { + dedicatedHostTags[key] = value + } + + // Add tags to the allocation request + if len(dedicatedHostTags) > 0 { + var tagSpecs []types.TagSpecification + var tags []types.Tag + for key, value := range dedicatedHostTags { + tags = append(tags, types.Tag{ + Key: aws.String(key), + Value: aws.String(value), + }) + } + tagSpecs = append(tagSpecs, types.TagSpecification{ + ResourceType: types.ResourceTypeDedicatedHost, + Tags: tags, + }) + input.TagSpecifications = tagSpecs + } + + s.scope.Info("Allocating dedicated host", "input", input, "machine", scope.Name()) + output, err := s.EC2Client.AllocateHosts(ctx, input) + if err != nil { + return "", errors.Wrap(err, fmt.Sprintf("failed to allocate dedicated host: %+v", input)) + } + + // Ensure we got exactly one host as expected + if len(output.HostIds) != 1 { + return "", errors.Errorf("expected one dedicated host, but got %d hosts", len(output.HostIds)) + } + + hostID := output.HostIds[0] + s.scope.Info("Successfully allocated single dedicated host", + "hostID", hostID, + "availabilityZone", availabilityZone, + "machine", scope.Name(), + "instanceType", instanceType) + record.Eventf(s.scope.InfraCluster(), "SuccessfulAllocateDedicatedHost", "Allocated dedicated host %s in %s for machine %s", hostID, availabilityZone, scope.Name()) + + return hostID, nil +} + +// ReleaseDedicatedHost releases a dedicated host with enhanced retry logic. +// This function uses AWS SDK v2's built-in retry mechanisms optimized for +// dedicated host operations, which are expensive resources requiring robust retry handling. +func (s *Service) ReleaseDedicatedHost(ctx context.Context, hostID string) error { + s.scope.Debug("Releasing dedicated host", "hostID", hostID) + + input := &ec2.ReleaseHostsInput{ + HostIds: []string{hostID}, + } + + // Create a client with enhanced retry configuration for dedicated host operations + clientWithRetry := s.createClientWithDedicatedHostRetryConfig() + + output, err := clientWithRetry.ReleaseHosts(ctx, input) + if err != nil { + errorCode := s.getErrorCode(err) + s.scope.Error(err, "Failed to release dedicated host", + "hostID", hostID, + "errorCode", errorCode, + "result", s.getReleaseHostsOutput(output)) + record.Warnf(s.scope.InfraCluster(), "FailedReleaseDedicatedHost", "Failed to release dedicated host %s: %v", hostID, err) + return errors.Wrap(err, "failed to release dedicated host") + } + + s.scope.Info("Successfully released dedicated host", + "hostID", hostID, + "result", s.getReleaseHostsOutput(output)) + record.Eventf(s.scope.InfraCluster(), "SuccessfulReleaseDedicatedHost", "Released dedicated host %s", hostID) + return nil +} + +// createClientWithDedicatedHostRetryConfig creates an EC2 client with enhanced retry configuration +// specifically optimized for dedicated host operations using RetryerV2 interface. +func (s *Service) createClientWithDedicatedHostRetryConfig() *ec2.Client { + // Get the base configuration from the service's session + cfg := s.scope.Session() + + // Create a custom RetryerV2 for dedicated host operations + // Using AWS SDK's built-in adaptive retry mode which implements RetryerV2 + dedicatedHostRetryer := retry.NewAdaptiveMode(func(o *retry.AdaptiveModeOptions) { + // More aggressive retry configuration for expensive dedicated host operations + o.StandardOptions = append(o.StandardOptions, func(so *retry.StandardOptions) { + so.MaxAttempts = 5 // Maximum retry attempts + so.MaxBackoff = 30 * time.Second // Maximum backoff time + so.Backoff = retry.NewExponentialJitterBackoff(time.Second) // 1 second initial delay with built-in jitter + }) + }) + + // Override the retry configuration in the config using RetryerV2 + // provides better context handling and granular control over retry attempts + cfg.Retryer = func() aws.Retryer { + return dedicatedHostRetryer // AdaptiveMode implements aws.RetryerV2 + } + + // Create a new client with the enhanced RetryerV2 configuration + // The RetryerV2 interface provides: + // - GetAttemptToken(context.Context) for context-aware retry decisions + // - Better integration with AWS SDK v2's context handling + // - More granular control over retry behavior + return ec2.NewFromConfig(cfg) +} + +// getErrorCode extracts the error code from an AWS error. +func (s *Service) getErrorCode(err error) string { + if smithyErr := awserrors.ParseSmithyError(err); smithyErr != nil { + return smithyErr.ErrorCode() + } + if code, ok := awserrors.Code(err); ok { + return code + } + return "Unknown" +} + +// DescribeDedicatedHost describes a specific dedicated host. +func (s *Service) DescribeDedicatedHost(ctx context.Context, hostID string) (*infrav1.DedicatedHostInfo, error) { + input := &ec2.DescribeHostsInput{ + HostIds: []string{hostID}, + } + + output, err := s.EC2Client.DescribeHosts(ctx, input) + if err != nil { + return nil, errors.Wrap(err, "failed to describe dedicated host") + } + + if len(output.Hosts) == 0 { + return nil, errors.Errorf("dedicated host %s not found", hostID) + } + + host := output.Hosts[0] + hostInfo := s.convertToHostInfo(host) + + return hostInfo, nil +} + +// convertToHostInfo converts an AWS Host to the DedicatedHostInfo struct. +func (s *Service) convertToHostInfo(host types.Host) *infrav1.DedicatedHostInfo { + hostInfo := &infrav1.DedicatedHostInfo{ + HostID: aws.ToString(host.HostId), + AvailabilityZone: aws.ToString(host.AvailabilityZone), + State: string(host.State), + Tags: make(map[string]string), + } + + // Parse properties from HostProperties + if host.HostProperties != nil { + if host.HostProperties.InstanceFamily != nil { + hostInfo.InstanceFamily = *host.HostProperties.InstanceFamily + } + if host.HostProperties.InstanceType != nil { + hostInfo.InstanceType = *host.HostProperties.InstanceType + } + if host.HostProperties.TotalVCpus != nil { + hostInfo.TotalCapacity = *host.HostProperties.TotalVCpus + } + } + + // Calculate available capacity from instances + instanceCount := len(host.Instances) + if instanceCount > math.MaxInt32 { + instanceCount = math.MaxInt32 + } + // bounds check ensures instanceCount <= math.MaxInt32, preventing integer overflow + usedCapacity := int32(instanceCount) + hostInfo.AvailableCapacity = hostInfo.TotalCapacity - usedCapacity + + // Convert tags + for _, tag := range host.Tags { + if tag.Key != nil && tag.Value != nil { + hostInfo.Tags[*tag.Key] = *tag.Value + } + } + + return hostInfo +} + +func (s *Service) getReleaseHostsOutput(output *ec2.ReleaseHostsOutput) string { + var errs []string + + if output.Successful != nil { + return strings.Join(output.Successful, ", ") + } else if output.Unsuccessful != nil { + for _, err := range output.Unsuccessful { + var errResource string + if err.Error != nil { + errResource = fmt.Sprintf("Resource ID: %s, Error code: %s, Error message: %s", aws.ToString(err.ResourceId), aws.ToString(err.Error.Code), aws.ToString(err.Error.Message)) + } else { + errResource = fmt.Sprintf("Resource ID: %s", aws.ToString(err.ResourceId)) + } + errs = append(errs, errResource) + } + return strings.Join(errs, ", ") + } + + return "" +} diff --git a/pkg/cloud/services/ec2/dedicatedhosts_test.go b/pkg/cloud/services/ec2/dedicatedhosts_test.go new file mode 100644 index 0000000000..2391fbf2e3 --- /dev/null +++ b/pkg/cloud/services/ec2/dedicatedhosts_test.go @@ -0,0 +1,344 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ec2 + +import ( + "context" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ec2" + "github.com/aws/aws-sdk-go-v2/service/ec2/types" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" + "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" +) + +func createTestClusterScope(t *testing.T) *scope.ClusterScope { + t.Helper() + scheme := runtime.NewScheme() + _ = infrav1.AddToScheme(scheme) + client := fake.NewClientBuilder().WithScheme(scheme).Build() + + scope, err := scope.NewClusterScope(scope.ClusterScopeParams{ + Client: client, + Cluster: &clusterv1.Cluster{}, + AWSCluster: &infrav1.AWSCluster{ + ObjectMeta: metav1.ObjectMeta{Name: "test"}, + Spec: infrav1.AWSClusterSpec{ + NetworkSpec: infrav1.NetworkSpec{ + VPC: infrav1.VPCSpec{ + ID: "test-vpc", + }, + }, + }, + }, + }) + if err != nil { + t.Fatalf("Failed to create test context: %v", err) + } + return scope +} + +func createTestMachineScope(t *testing.T, clusterScope *scope.ClusterScope) *scope.MachineScope { + t.Helper() + scheme := runtime.NewScheme() + _ = infrav1.AddToScheme(scheme) + _ = clusterv1.AddToScheme(scheme) + client := fake.NewClientBuilder().WithScheme(scheme).Build() + + machine := &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-machine", + Namespace: "default", + }, + } + + awsMachine := &infrav1.AWSMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-aws-machine", + Namespace: "default", + }, + Spec: infrav1.AWSMachineSpec{ + InstanceType: "m5.large", + AdditionalTags: infrav1.Tags{ + "Environment": "test", + "Owner": "test-user", + }, + }, + } + + machineScope, err := scope.NewMachineScope(scope.MachineScopeParams{ + Client: client, + Cluster: clusterScope.Cluster, + Machine: machine, + AWSMachine: awsMachine, + InfraCluster: clusterScope, + }) + if err != nil { + t.Fatalf("Failed to create test machine scope: %v", err) + } + return machineScope +} + +func TestAllocateDedicatedHost(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + tests := []struct { + name string + dynamicAllocationSpec *infrav1.DynamicHostAllocationSpec + availabilityZone string + expectError bool + instanceType string + setupMocks func(m *mocks.MockEC2API) + }{ + { + name: "should allocate exactly one dedicated host", + dynamicAllocationSpec: &infrav1.DynamicHostAllocationSpec{ + Tags: map[string]string{ + "Environment": "production", // This should override the machine's "test" value + "Purpose": "dedicated", // This should be added from dedicated host specific tags + }, + }, + availabilityZone: "us-west-2a", + instanceType: "m5.large", + expectError: false, + setupMocks: func(m *mocks.MockEC2API) { + // Mock AllocateHosts to return exactly one host + m.EXPECT().AllocateHosts(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, input *ec2.AllocateHostsInput, optFns ...func(*ec2.Options)) (*ec2.AllocateHostsOutput, error) { + // Verify that quantity is set to 1 + assert.Equal(t, int32(1), *input.Quantity) + + // Verify that tags are being passed + assert.NotNil(t, input.TagSpecifications) + assert.Len(t, input.TagSpecifications, 1) + assert.Equal(t, types.ResourceTypeDedicatedHost, input.TagSpecifications[0].ResourceType) + + // Verify that only the expected tags are present (no standard cluster/machine tags) + expectedTags := map[string]string{ + "Environment": "production", // from dedicated host specific tags (overrides machine's "test") + "Owner": "test-user", // from machine AdditionalTags + "Purpose": "dedicated", // from dedicated host specific tags + } + + // Verify we have exactly the expected number of tags + assert.Equal(t, len(expectedTags), len(input.TagSpecifications[0].Tags), "Should have exactly the expected number of tags") + + // Verify each expected tag is present with correct value + for _, tag := range input.TagSpecifications[0].Tags { + key := aws.ToString(tag.Key) + value := aws.ToString(tag.Value) + expectedValue, exists := expectedTags[key] + assert.True(t, exists, "Unexpected tag found: %s", key) + assert.Equal(t, expectedValue, value, "Tag %s should have value %s", key, expectedValue) + } + + return &ec2.AllocateHostsOutput{ + HostIds: []string{"h-1234567890abcdef0"}, + }, nil + }) + }, + }, + { + name: "should fail if AWS returns multiple hosts", + dynamicAllocationSpec: &infrav1.DynamicHostAllocationSpec{}, + availabilityZone: "us-west-2a", + instanceType: "m5.large", + expectError: true, + setupMocks: func(m *mocks.MockEC2API) { + // Mock AllocateHosts to return multiple hosts (should never happen with quantity=1, but test the validation) + m.EXPECT().AllocateHosts(gomock.Any(), gomock.Any(), gomock.Any()).Return(&ec2.AllocateHostsOutput{ + HostIds: []string{"h-1234567890abcdef0", "h-0987654321fedcba0"}, + }, nil) + }, + }, + { + name: "should fail if AWS returns no hosts", + dynamicAllocationSpec: &infrav1.DynamicHostAllocationSpec{}, + availabilityZone: "us-west-2a", + instanceType: "m5.large", + expectError: true, + setupMocks: func(m *mocks.MockEC2API) { + // Mock AllocateHosts to return no hosts + m.EXPECT().AllocateHosts(gomock.Any(), gomock.Any(), gomock.Any()).Return(&ec2.AllocateHostsOutput{ + HostIds: []string{}, + }, nil) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ec2Mock := mocks.NewMockEC2API(mockCtrl) + tt.setupMocks(ec2Mock) + + clusterScope := createTestClusterScope(t) + machineScope := createTestMachineScope(t, clusterScope) + s := NewService(clusterScope) + s.EC2Client = ec2Mock + + hostID, err := s.AllocateDedicatedHost(context.TODO(), tt.dynamicAllocationSpec, tt.instanceType, tt.availabilityZone, machineScope) + + if tt.expectError { + assert.Error(t, err) + assert.Empty(t, hostID) + } else { + assert.NoError(t, err) + assert.NotEmpty(t, hostID) + } + }) + } +} + +func TestDescribeDedicatedHost(t *testing.T) { + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + hostID := "h-1234567890abcdef0" + + host := types.Host{ + HostId: aws.String(hostID), + AvailabilityZone: aws.String("us-west-2a"), + State: types.AllocationStateAvailable, + HostProperties: &types.HostProperties{ + InstanceFamily: aws.String("m5"), + InstanceType: aws.String("m5.large"), + TotalVCpus: aws.Int32(2), + }, + Instances: []types.HostInstance{}, + Tags: []types.Tag{ + { + Key: aws.String("Environment"), + Value: aws.String("test"), + }, + }, + } + + ec2Mock := mocks.NewMockEC2API(mockCtrl) + ec2Mock.EXPECT().DescribeHosts(gomock.Any(), gomock.Any()).Return(&ec2.DescribeHostsOutput{ + Hosts: []types.Host{host}, + }, nil) + + scope := createTestClusterScope(t) + s := NewService(scope) + s.EC2Client = ec2Mock + + hostInfo, err := s.DescribeDedicatedHost(context.TODO(), hostID) + assert.NoError(t, err) + assert.NotNil(t, hostInfo) + assert.Equal(t, hostID, hostInfo.HostID) + assert.Equal(t, "m5", hostInfo.InstanceFamily) + assert.Equal(t, "m5.large", hostInfo.InstanceType) + assert.Equal(t, "us-west-2a", hostInfo.AvailabilityZone) + assert.Equal(t, "available", hostInfo.State) + assert.Equal(t, int32(2), hostInfo.TotalCapacity) + assert.Equal(t, int32(2), hostInfo.AvailableCapacity) // No instances running + assert.Equal(t, "test", hostInfo.Tags["Environment"]) +} + +func TestAllocateDedicatedHostMultipleMachines(t *testing.T) { + // This test verifies that multiple machines each get their own dedicated host + // This is the intended behavior for dedicated hosts - each machine gets complete isolation + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + // Create two machine scopes that would both try to allocate hosts + clusterScope := createTestClusterScope(t) + machineScope1 := createTestMachineScope(t, clusterScope) + machineScope2 := createTestMachineScope(t, clusterScope) + + // Give them different names to simulate different machines + machineScope1.AWSMachine.Name = "test-machine-1" + machineScope2.AWSMachine.Name = "test-machine-2" + + ec2Mock := mocks.NewMockEC2API(mockCtrl) + + // Both machines will call AllocateHosts and get separate hosts + // This is the intended behavior - each machine gets its own dedicated host for isolation + ec2Mock.EXPECT().AllocateHosts(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, input *ec2.AllocateHostsInput, optFns ...func(*ec2.Options)) (*ec2.AllocateHostsOutput, error) { + // Verify that quantity is set to 1 + assert.Equal(t, int32(1), *input.Quantity) + return &ec2.AllocateHostsOutput{ + HostIds: []string{"h-1234567890abcdef0"}, + }, nil + }).Times(2) // Expect two calls for two machines + + s := NewService(clusterScope) + s.EC2Client = ec2Mock + + spec := &infrav1.DynamicHostAllocationSpec{ + Tags: map[string]string{ + "Environment": "test", + }, + } + + // Simulate concurrent allocation (in real scenario, these would be concurrent) + hostID1, err1 := s.AllocateDedicatedHost(context.TODO(), spec, "m5.large", "us-west-2a", machineScope1) + hostID2, err2 := s.AllocateDedicatedHost(context.TODO(), spec, "m5.large", "us-west-2a", machineScope2) + + // Both should succeed but get different hosts (demonstrating the race condition) + assert.NoError(t, err1) + assert.NoError(t, err2) + assert.NotEmpty(t, hostID1) + assert.NotEmpty(t, hostID2) + assert.Equal(t, "h-1234567890abcdef0", hostID1) + assert.Equal(t, "h-1234567890abcdef0", hostID2) // Same host ID because of mock, but in real scenario they'd be different +} + +func TestConvertToHostInfo(t *testing.T) { + hostID := "h-1234567890abcdef0" + + host := types.Host{ + HostId: aws.String(hostID), + AvailabilityZone: aws.String("us-west-2a"), + State: types.AllocationStateAvailable, + HostProperties: &types.HostProperties{ + InstanceFamily: aws.String("m5"), + InstanceType: aws.String("m5.large"), + TotalVCpus: aws.Int32(4), + }, + Instances: []types.HostInstance{ + {InstanceId: aws.String("i-1234567890abcdef0")}, + }, + Tags: []types.Tag{ + { + Key: aws.String("Environment"), + Value: aws.String("test"), + }, + }, + } + + s := &Service{} + hostInfo := s.convertToHostInfo(host) + + assert.Equal(t, hostID, hostInfo.HostID) + assert.Equal(t, "m5", hostInfo.InstanceFamily) + assert.Equal(t, "m5.large", hostInfo.InstanceType) + assert.Equal(t, "us-west-2a", hostInfo.AvailabilityZone) + assert.Equal(t, "available", hostInfo.State) + assert.Equal(t, int32(4), hostInfo.TotalCapacity) + assert.Equal(t, int32(3), hostInfo.AvailableCapacity) // 1 instance running, 3 available + assert.Equal(t, "test", hostInfo.Tags["Environment"]) +} diff --git a/pkg/cloud/services/ec2/helper_test.go b/pkg/cloud/services/ec2/helper_test.go index bd40c2b7bb..889a82e23f 100644 --- a/pkg/cloud/services/ec2/helper_test.go +++ b/pkg/cloud/services/ec2/helper_test.go @@ -24,15 +24,13 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func setupClusterScope(cl client.Client) (*scope.ClusterScope, error) { @@ -164,8 +162,8 @@ func newAWSManagedControlPlane() *ekscontrolplanev1.AWSManagedControlPlane { } } -func newMachinePool() *v1beta1.MachinePool { - return &v1beta1.MachinePool{ +func newMachinePool() *clusterv1.MachinePool { + return &clusterv1.MachinePool{ TypeMeta: metav1.TypeMeta{ Kind: "MachinePool", APIVersion: "v1", @@ -173,10 +171,10 @@ func newMachinePool() *v1beta1.MachinePool { ObjectMeta: metav1.ObjectMeta{ Name: "mp", }, - Spec: v1beta1.MachinePoolSpec{ + Spec: clusterv1.MachinePoolSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To[string]("v1.23.3"), + Version: "v1.23.3", }, }, }, @@ -206,7 +204,7 @@ func setupScheme() (*runtime.Scheme, error) { if err := ekscontrolplanev1.AddToScheme(scheme); err != nil { return nil, err } - if err := v1beta1.AddToScheme(scheme); err != nil { + if err := clusterv1.AddToScheme(scheme); err != nil { return nil, err } return scheme, nil diff --git a/pkg/cloud/services/ec2/instances.go b/pkg/cloud/services/ec2/instances.go index 6e5813c74a..7a311a9326 100644 --- a/pkg/cloud/services/ec2/instances.go +++ b/pkg/cloud/services/ec2/instances.go @@ -39,7 +39,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/utils" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // GetRunningInstanceByTags returns the existing instance or nothing if it doesn't exist. @@ -144,7 +144,7 @@ func (s *Service) CreateInstance(ctx context.Context, scope *scope.MachineScope, if scope.AWSMachine.Spec.AMI.ID != nil { //nolint:nestif input.ImageID = *scope.AWSMachine.Spec.AMI.ID } else { - if scope.Machine.Spec.Version == nil { + if scope.Machine.Spec.Version == "" { err := errors.New("Either AWSMachine's spec.ami.id or Machine's spec.version must be defined") scope.SetFailureReason("CreateError") scope.SetFailureMessage(err) @@ -167,12 +167,12 @@ func (s *Service) CreateInstance(ctx context.Context, scope *scope.MachineScope, } if scope.IsEKSManaged() && imageLookupFormat == "" && imageLookupOrg == "" && imageLookupBaseOS == "" { - input.ImageID, err = s.eksAMILookup(ctx, *scope.Machine.Spec.Version, imageArchitecture, scope.AWSMachine.Spec.AMI.EKSOptimizedLookupType) + input.ImageID, err = s.eksAMILookup(ctx, scope.Machine.Spec.Version, imageArchitecture, scope.AWSMachine.Spec.AMI.EKSOptimizedLookupType) if err != nil { return nil, err } } else { - input.ImageID, err = s.defaultAMIIDLookup(imageLookupFormat, imageLookupOrg, imageLookupBaseOS, imageArchitecture, *scope.Machine.Spec.Version) + input.ImageID, err = s.defaultAMIIDLookup(imageLookupFormat, imageLookupOrg, imageLookupBaseOS, imageArchitecture, scope.Machine.Spec.Version) if err != nil { return nil, err } @@ -258,9 +258,25 @@ func (s *Service) CreateInstance(ctx context.Context, scope *scope.MachineScope, input.MarketType = scope.AWSMachine.Spec.MarketType - input.HostID = scope.AWSMachine.Spec.HostID + // Handle dynamic host allocation if specified + if scope.AWSMachine.Spec.DynamicHostAllocation != nil { + hostID, err := s.ensureDedicatedHostAllocation(ctx, scope) + if err != nil { + return nil, errors.Wrap(err, "failed to allocate dedicated host") + } + input.HostID = aws.String(hostID) + input.HostAffinity = aws.String("host") - input.HostAffinity = scope.AWSMachine.Spec.HostAffinity + if scope.AWSMachine.Status.DedicatedHost == nil { + scope.AWSMachine.Status.DedicatedHost = &infrav1.DedicatedHostStatus{} + } + // Update machine status with allocated host ID + scope.AWSMachine.Status.DedicatedHost.ID = &hostID + } else { + // Use static host allocation if specified + input.HostID = scope.AWSMachine.Spec.HostID + input.HostAffinity = scope.AWSMachine.Spec.HostAffinity + } input.CapacityReservationPreference = scope.AWSMachine.Spec.CapacityReservationPreference @@ -355,11 +371,11 @@ func (s *Service) findSubnet(scope *scope.MachineScope) (string, error) { var filtered []types.Subnet var errMessage string for _, subnet := range subnets { - if failureDomain != nil && *subnet.AvailabilityZone != *failureDomain { + if failureDomain != "" && *subnet.AvailabilityZone != failureDomain { // we could have included the failure domain in the query criteria, but then we end up with EC2 error // messages that don't give a good hint about what is really wrong errMessage += fmt.Sprintf(" subnet %q availability zone %q does not match failure domain %q.", - *subnet.SubnetId, *subnet.AvailabilityZone, *failureDomain) + *subnet.SubnetId, *subnet.AvailabilityZone, failureDomain) continue } @@ -395,22 +411,22 @@ func (s *Service) findSubnet(scope *scope.MachineScope) (string, error) { return "", awserrors.NewFailedDependency(errMessage) } return *filtered[0].SubnetId, nil - case failureDomain != nil: + case failureDomain != "": if scope.AWSMachine.Spec.PublicIP != nil && *scope.AWSMachine.Spec.PublicIP { - subnets := s.scope.Subnets().FilterPublic().FilterNonCni().FilterByZone(*failureDomain) + subnets := s.scope.Subnets().FilterPublic().FilterNonCni().FilterByZone(failureDomain) if len(subnets) == 0 { errMessage := fmt.Sprintf("failed to run machine %q with public IP, no public subnets available in availability zone %q", - scope.Name(), *failureDomain) + scope.Name(), failureDomain) record.Warnf(scope.AWSMachine, "FailedCreate", errMessage) return "", awserrors.NewFailedDependency(errMessage) } return subnets[0].GetResourceID(), nil } - subnets := s.scope.Subnets().FilterPrivate().FilterNonCni().FilterByZone(*failureDomain) + subnets := s.scope.Subnets().FilterPrivate().FilterNonCni().FilterByZone(failureDomain) if len(subnets) == 0 { errMessage := fmt.Sprintf("failed to run machine %q, no subnets available in availability zone %q", - scope.Name(), *failureDomain) + scope.Name(), failureDomain) record.Warnf(scope.AWSMachine, "FailedCreate", errMessage) return "", awserrors.NewFailedDependency(errMessage) } @@ -578,7 +594,7 @@ func (s *Service) runInstance(role string, i *infrav1.Instance) (*infrav1.Instan for index, id := range i.NetworkInterfaces { netInterfaces = append(netInterfaces, types.InstanceNetworkInterfaceSpecification{ NetworkInterfaceId: aws.String(id), - DeviceIndex: aws.Int32(int32(index)), //nolint:gosec // disable G115 + DeviceIndex: aws.Int32(int32(index)), }) } netInterfaces[0].AssociatePublicIpAddress = i.PublicIPOnLaunch @@ -976,22 +992,22 @@ func (s *Service) SDKToInstance(v types.Instance) (*infrav1.Instance, error) { return i, nil } -func (s *Service) getInstanceAddresses(instance types.Instance) []clusterv1.MachineAddress { - addresses := []clusterv1.MachineAddress{} +func (s *Service) getInstanceAddresses(instance types.Instance) []clusterv1beta1.MachineAddress { + addresses := []clusterv1beta1.MachineAddress{} // Check if the DHCP Option Set has domain name set domainName := s.GetDHCPOptionSetDomainName(s.EC2Client, instance.VpcId) for _, eni := range instance.NetworkInterfaces { if addr := aws.ToString(eni.PrivateDnsName); addr != "" { - privateDNSAddress := clusterv1.MachineAddress{ - Type: clusterv1.MachineInternalDNS, + privateDNSAddress := clusterv1beta1.MachineAddress{ + Type: clusterv1beta1.MachineInternalDNS, Address: addr, } addresses = append(addresses, privateDNSAddress) if domainName != nil { // Add secondary private DNS Name with domain name set in DHCP Option Set - additionalPrivateDNSAddress := clusterv1.MachineAddress{ - Type: clusterv1.MachineInternalDNS, + additionalPrivateDNSAddress := clusterv1beta1.MachineAddress{ + Type: clusterv1beta1.MachineInternalDNS, Address: fmt.Sprintf("%s.%s", strings.Split(privateDNSAddress.Address, ".")[0], *domainName), } addresses = append(addresses, additionalPrivateDNSAddress) @@ -999,8 +1015,8 @@ func (s *Service) getInstanceAddresses(instance types.Instance) []clusterv1.Mach } if addr := aws.ToString(eni.PrivateIpAddress); addr != "" { - privateIPAddress := clusterv1.MachineAddress{ - Type: clusterv1.MachineInternalIP, + privateIPAddress := clusterv1beta1.MachineAddress{ + Type: clusterv1beta1.MachineInternalIP, Address: addr, } addresses = append(addresses, privateIPAddress) @@ -1009,16 +1025,16 @@ func (s *Service) getInstanceAddresses(instance types.Instance) []clusterv1.Mach // An elastic IP is attached if association is non nil pointer if eni.Association != nil { if addr := aws.ToString(eni.Association.PublicDnsName); addr != "" { - publicDNSAddress := clusterv1.MachineAddress{ - Type: clusterv1.MachineExternalDNS, + publicDNSAddress := clusterv1beta1.MachineAddress{ + Type: clusterv1beta1.MachineExternalDNS, Address: addr, } addresses = append(addresses, publicDNSAddress) } if addr := aws.ToString(eni.Association.PublicIp); addr != "" { - publicIPAddress := clusterv1.MachineAddress{ - Type: clusterv1.MachineExternalIP, + publicIPAddress := clusterv1beta1.MachineAddress{ + Type: clusterv1beta1.MachineExternalIP, Address: addr, } addresses = append(addresses, publicIPAddress) @@ -1279,6 +1295,61 @@ func getInstanceMetadataOptionsRequest(metadataOptions *infrav1.InstanceMetadata return request } +// ensureDedicatedHostAllocation ensures a dedicated host is allocated for the machine. +func (s *Service) ensureDedicatedHostAllocation(ctx context.Context, scope *scope.MachineScope) (string, error) { + spec := scope.AWSMachine.Spec.DynamicHostAllocation + if spec == nil { + return "", errors.New("dynamic host allocation spec is nil") + } + + // Check if a host is already allocated for this machine + // Each machine gets its own dedicated host for complete isolation and resource dedication + if scope.AWSMachine.Status.DedicatedHost != nil && scope.AWSMachine.Status.DedicatedHost.ID != nil { + existingHostID := aws.ToString(scope.AWSMachine.Status.DedicatedHost.ID) + s.scope.Info("Found existing allocated host for machine", "hostID", existingHostID, "machine", scope.Name()) + return existingHostID, nil + } + + // Determine the availability zone for the host + var availabilityZone *string + + // Get AZ from the machine's subnet + if scope.AWSMachine.Spec.Subnet != nil { + subnetID, err := s.findSubnet(scope) + if err != nil { + return "", errors.Wrap(err, "failed to find subnet for host allocation") + } + + // Get the full subnet object to extract availability zone + subnets, err := s.getFilteredSubnets(types.Filter{ + Name: aws.String("subnet-id"), + Values: []string{subnetID}, + }) + if err != nil { + return "", errors.Wrap(err, "failed to get subnet details for host allocation") + } + + if len(subnets) > 0 && subnets[0].AvailabilityZone != nil { + availabilityZone = subnets[0].AvailabilityZone + } + } + + instanceType := scope.AWSMachine.Spec.InstanceType + + if availabilityZone == nil { + return "", errors.New("availability zone could not be determined, please specify a subnet ID or subnet filters") + } + + // Allocate the dedicated host + hostID, err := s.AllocateDedicatedHost(ctx, spec, instanceType, *availabilityZone, scope) + if err != nil { + return "", errors.Wrap(err, "failed to allocate dedicated host") + } + + s.scope.Info("Successfully allocated dedicated host for machine", "hostID", hostID, "machine", scope.Name()) + return hostID, nil +} + func getPrivateDNSNameOptionsRequest(privateDNSName *infrav1.PrivateDNSName) *types.PrivateDnsNameOptionsRequest { if privateDNSName == nil { return nil diff --git a/pkg/cloud/services/ec2/instances_test.go b/pkg/cloud/services/ec2/instances_test.go index b6c7c69d23..86ceb01f3d 100644 --- a/pkg/cloud/services/ec2/instances_test.go +++ b/pkg/cloud/services/ec2/instances_test.go @@ -43,7 +43,8 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func TestInstanceIfExists(t *testing.T) { @@ -435,7 +436,7 @@ func TestCreateInstance(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To[string]("bootstrap-data"), }, - FailureDomain: aws.String("us-east-1c"), + FailureDomain: "us-east-1c", }, }, machineConfig: &infrav1.AWSMachineSpec{ @@ -569,7 +570,7 @@ func TestCreateInstance(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ DataSecretName: aws.String("bootstrap-data"), }, - FailureDomain: aws.String("us-east-1c"), + FailureDomain: "us-east-1c", }, }, machineConfig: &infrav1.AWSMachineSpec{ @@ -657,7 +658,8 @@ func TestCreateInstance(t *testing.T) { Name: aws.String("availability-zone"), Values: []string{"us-east-1c"}, }, - }})).Return(&ec2.DescribeSubnetsOutput{ + }, + })).Return(&ec2.DescribeSubnetsOutput{ Subnets: []types.Subnet{ { VpcId: aws.String("vpc-incorrect-1"), @@ -829,7 +831,7 @@ func TestCreateInstance(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ DataSecretName: aws.String("bootstrap-data"), }, - FailureDomain: aws.String("us-east-1c"), + FailureDomain: "us-east-1c", }, }, machineConfig: &infrav1.AWSMachineSpec{ @@ -925,7 +927,8 @@ func TestCreateInstance(t *testing.T) { Name: aws.String("availability-zone"), Values: []string{"us-east-1c"}, }, - }})).Return(&ec2.DescribeSubnetsOutput{ + }, + })).Return(&ec2.DescribeSubnetsOutput{ Subnets: []types.Subnet{ { VpcId: aws.String("vpc-bar"), @@ -1082,7 +1085,7 @@ func TestCreateInstance(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To[string]("bootstrap-data"), }, - Version: ptr.To[string]("v1.16.1"), + Version: "v1.16.1", }, }, machineConfig: &infrav1.AWSMachineSpec{ @@ -1235,7 +1238,7 @@ func TestCreateInstance(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To[string]("bootstrap-data"), }, - Version: ptr.To[string]("v1.16.1"), + Version: "v1.16.1", }, }, machineConfig: &infrav1.AWSMachineSpec{ @@ -1388,7 +1391,7 @@ func TestCreateInstance(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To[string]("bootstrap-data"), }, - Version: ptr.To[string]("v1.16.1"), + Version: "v1.16.1", }, }, machineConfig: &infrav1.AWSMachineSpec{ @@ -1542,7 +1545,7 @@ func TestCreateInstance(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To[string]("bootstrap-data"), }, - FailureDomain: aws.String("us-east-1b"), + FailureDomain: "us-east-1b", }, }, machineConfig: &infrav1.AWSMachineSpec{ @@ -1687,7 +1690,7 @@ func TestCreateInstance(t *testing.T) { VPC: infrav1.VPCSpec{ ID: "vpc-id", }, - Subnets: infrav1.Subnets{{ + Subnets: infrav1.Subnets{infrav1.SubnetSpec{ ID: "matching-subnet", }}, }, @@ -1813,7 +1816,7 @@ func TestCreateInstance(t *testing.T) { VPC: infrav1.VPCSpec{ ID: "vpc-id", }, - Subnets: infrav1.Subnets{{ + Subnets: infrav1.Subnets{infrav1.SubnetSpec{ ID: "subnet-1", }}, }, @@ -1905,7 +1908,7 @@ func TestCreateInstance(t *testing.T) { VPC: infrav1.VPCSpec{ ID: "vpc-id", }, - Subnets: infrav1.Subnets{{ + Subnets: infrav1.Subnets{infrav1.SubnetSpec{ ID: "subnet-1", }}, }, @@ -2012,7 +2015,7 @@ func TestCreateInstance(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To[string]("bootstrap-data"), }, - FailureDomain: aws.String("us-east-1b"), + FailureDomain: "us-east-1b", }, }, machineConfig: &infrav1.AWSMachineSpec{ @@ -2031,7 +2034,7 @@ func TestCreateInstance(t *testing.T) { VPC: infrav1.VPCSpec{ ID: "vpc-id", }, - Subnets: infrav1.Subnets{{ + Subnets: infrav1.Subnets{infrav1.SubnetSpec{ ID: "subnet-1", AvailabilityZone: "us-west-1b", }}, @@ -2109,7 +2112,7 @@ func TestCreateInstance(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To[string]("bootstrap-data"), }, - FailureDomain: aws.String("us-east-1b"), + FailureDomain: "us-east-1b", }, }, machineConfig: &infrav1.AWSMachineSpec{ @@ -2126,7 +2129,7 @@ func TestCreateInstance(t *testing.T) { VPC: infrav1.VPCSpec{ ID: "vpc-id", }, - Subnets: infrav1.Subnets{{ + Subnets: infrav1.Subnets{infrav1.SubnetSpec{ ID: "private-subnet-1", AvailabilityZone: "us-east-1b", IsPublic: false, @@ -2211,7 +2214,7 @@ func TestCreateInstance(t *testing.T) { VPC: infrav1.VPCSpec{ ID: "vpc-id", }, - Subnets: infrav1.Subnets{{ + Subnets: infrav1.Subnets{infrav1.SubnetSpec{ ID: "public-subnet-1", IsPublic: true, }}, @@ -2340,7 +2343,7 @@ func TestCreateInstance(t *testing.T) { VPC: infrav1.VPCSpec{ ID: "vpc-id", }, - Subnets: infrav1.Subnets{{ + Subnets: infrav1.Subnets{infrav1.SubnetSpec{ ID: "public-subnet-1", IsPublic: true, }}, @@ -2611,7 +2614,7 @@ func TestCreateInstance(t *testing.T) { VPC: infrav1.VPCSpec{ ID: "vpc-id", }, - Subnets: infrav1.Subnets{{ + Subnets: infrav1.Subnets{infrav1.SubnetSpec{ ID: "private-subnet-1", IsPublic: false, }}, @@ -4381,7 +4384,7 @@ func TestCreateInstance(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To[string]("bootstrap-data"), }, - Version: ptr.To[string]("v1.16.1"), + Version: "v1.16.1", }, }, machineConfig: &infrav1.AWSMachineSpec{ @@ -4513,7 +4516,7 @@ func TestCreateInstance(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To[string]("bootstrap-data"), }, - Version: ptr.To[string]("v1.16.1"), + Version: "v1.16.1", }, }, machineConfig: &infrav1.AWSMachineSpec{ @@ -4646,7 +4649,7 @@ func TestCreateInstance(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To[string]("bootstrap-data"), }, - Version: ptr.To[string]("v1.16.1"), + Version: "v1.16.1", }, }, machineConfig: &infrav1.AWSMachineSpec{ @@ -4780,7 +4783,7 @@ func TestCreateInstance(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To[string]("bootstrap-data"), }, - Version: ptr.To[string]("v1.16.1"), + Version: "v1.16.1", }, }, machineConfig: &infrav1.AWSMachineSpec{ @@ -4911,7 +4914,7 @@ func TestCreateInstance(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To[string]("bootstrap-data"), }, - Version: ptr.To[string]("v1.16.1"), + Version: "v1.16.1", }, }, machineConfig: &infrav1.AWSMachineSpec{ @@ -5042,7 +5045,7 @@ func TestCreateInstance(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To[string]("bootstrap-data"), }, - Version: ptr.To[string]("v1.16.1"), + Version: "v1.16.1", }, }, machineConfig: &infrav1.AWSMachineSpec{ @@ -5319,11 +5322,11 @@ func TestCreateInstance(t *testing.T) { g.Expect(len(instance.Addresses)).To(Equal(3)) for _, address := range instance.Addresses { - if address.Type == clusterv1.MachineInternalIP { + if address.Type == clusterv1beta1.MachineInternalIP { g.Expect(address.Address).To(Equal("192.168.1.10")) } - if address.Type == clusterv1.MachineInternalDNS { + if address.Type == clusterv1beta1.MachineInternalDNS { g.Expect(address.Address).To(Or(Equal("ip-192-168-1-10.ec2.internal"), Equal("ip-192-168-1-10.example.com"))) } } @@ -5352,7 +5355,6 @@ func TestCreateInstance(t *testing.T) { awsCluster: &infrav1.AWSCluster{ ObjectMeta: metav1.ObjectMeta{Name: "test"}, Spec: infrav1.AWSClusterSpec{ - NetworkSpec: infrav1.NetworkSpec{ Subnets: infrav1.Subnets{ infrav1.SubnetSpec{ @@ -5557,7 +5559,6 @@ func TestCreateInstance(t *testing.T) { awsCluster: &infrav1.AWSCluster{ ObjectMeta: metav1.ObjectMeta{Name: "test"}, Spec: infrav1.AWSClusterSpec{ - NetworkSpec: infrav1.NetworkSpec{ Subnets: infrav1.Subnets{ infrav1.SubnetSpec{ @@ -5678,7 +5679,6 @@ func TestCreateInstance(t *testing.T) { awsCluster: &infrav1.AWSCluster{ ObjectMeta: metav1.ObjectMeta{Name: "test"}, Spec: infrav1.AWSClusterSpec{ - NetworkSpec: infrav1.NetworkSpec{ Subnets: infrav1.Subnets{ infrav1.SubnetSpec{ @@ -5905,256 +5905,6 @@ func TestCreateInstance(t *testing.T) { } }, }, - { - name: "with AMD SEV-SNP disabled", - machine: &clusterv1.Machine{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"set": "node"}, - }, - Spec: clusterv1.MachineSpec{ - Bootstrap: clusterv1.Bootstrap{ - DataSecretName: ptr.To[string]("bootstrap-data"), - }, - }, - }, - machineConfig: &infrav1.AWSMachineSpec{ - AMI: infrav1.AMIReference{ - ID: aws.String("abc"), - }, - InstanceType: "m6a.large", - CPUOptions: infrav1.CPUOptions{ - ConfidentialCompute: infrav1.AWSConfidentialComputePolicy("Disabled"), - }, - }, - awsCluster: &infrav1.AWSCluster{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: infrav1.AWSClusterSpec{ - NetworkSpec: infrav1.NetworkSpec{ - Subnets: infrav1.Subnets{ - infrav1.SubnetSpec{ - ID: "subnet-1", - IsPublic: false, - }, - infrav1.SubnetSpec{ - IsPublic: false, - }, - }, - VPC: infrav1.VPCSpec{ - ID: "vpc-test", - }, - }, - }, - Status: infrav1.AWSClusterStatus{ - Network: infrav1.NetworkStatus{ - SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{ - infrav1.SecurityGroupControlPlane: { - ID: "1", - }, - infrav1.SecurityGroupNode: { - ID: "2", - }, - infrav1.SecurityGroupLB: { - ID: "3", - }, - }, - APIServerELB: infrav1.LoadBalancer{ - DNSName: "test-apiserver.us-east-1.aws", - }, - }, - }, - }, - expect: func(m *mocks.MockEC2APIMockRecorder) { - m. - DescribeInstanceTypes(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{ - InstanceTypes: []types.InstanceType{ - types.InstanceTypeM6aLarge, - }, - })). - Return(&ec2.DescribeInstanceTypesOutput{ - InstanceTypes: []types.InstanceTypeInfo{ - { - ProcessorInfo: &types.ProcessorInfo{ - SupportedArchitectures: []types.ArchitectureType{ - types.ArchitectureTypeX8664, - }, - }, - }, - }, - }, nil) - m. // TODO: Restore these parameters, but with the tags as well - RunInstances(context.TODO(), gomock.Any()). - DoAndReturn(func(ctx context.Context, input *ec2.RunInstancesInput, optFns ...func(*ec2.Options)) (*ec2.RunInstancesOutput, error) { - if input.CpuOptions == nil { - t.Fatalf("expected AMD SEV-SNP to be disabled, but got no CpuOptions") - } else if input.CpuOptions.AmdSevSnp != types.AmdSevSnpSpecificationDisabled { - t.Fatalf("expected AMD SEV-SNP to be disabled, but got %s", input.CpuOptions.AmdSevSnp) - } - return &ec2.RunInstancesOutput{ - Instances: []types.Instance{ - { - State: &types.InstanceState{ - Name: types.InstanceStateNamePending, - }, - IamInstanceProfile: &types.IamInstanceProfile{ - Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"), - }, - InstanceId: aws.String("two"), - InstanceType: types.InstanceTypeM5Large, - SubnetId: aws.String("subnet-1"), - ImageId: aws.String("ami-1"), - RootDeviceName: aws.String("device-1"), - BlockDeviceMappings: []types.InstanceBlockDeviceMapping{ - { - DeviceName: aws.String("device-1"), - Ebs: &types.EbsInstanceBlockDevice{ - VolumeId: aws.String("volume-1"), - }, - }, - }, - Placement: &types.Placement{ - AvailabilityZone: &az, - }, - }, - }, - }, nil - }) - m. - DescribeNetworkInterfaces(context.TODO(), gomock.Any()). - Return(&ec2.DescribeNetworkInterfacesOutput{ - NetworkInterfaces: []types.NetworkInterface{}, - NextToken: nil, - }, nil) - }, - check: func(instance *infrav1.Instance, err error) { - if err != nil { - t.Fatalf("did not expect error: %v", err) - } - }, - }, - { - name: "with AMD SEV-SNP unspecified", - machine: &clusterv1.Machine{ - ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{"set": "node"}, - }, - Spec: clusterv1.MachineSpec{ - Bootstrap: clusterv1.Bootstrap{ - DataSecretName: ptr.To[string]("bootstrap-data"), - }, - }, - }, - machineConfig: &infrav1.AWSMachineSpec{ - AMI: infrav1.AMIReference{ - ID: aws.String("abc"), - }, - InstanceType: "m6a.large", - CPUOptions: infrav1.CPUOptions{ - ConfidentialCompute: "", - }, - }, - awsCluster: &infrav1.AWSCluster{ - ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: infrav1.AWSClusterSpec{ - NetworkSpec: infrav1.NetworkSpec{ - Subnets: infrav1.Subnets{ - infrav1.SubnetSpec{ - ID: "subnet-1", - IsPublic: false, - }, - infrav1.SubnetSpec{ - IsPublic: false, - }, - }, - VPC: infrav1.VPCSpec{ - ID: "vpc-test", - }, - }, - }, - Status: infrav1.AWSClusterStatus{ - Network: infrav1.NetworkStatus{ - SecurityGroups: map[infrav1.SecurityGroupRole]infrav1.SecurityGroup{ - infrav1.SecurityGroupControlPlane: { - ID: "1", - }, - infrav1.SecurityGroupNode: { - ID: "2", - }, - infrav1.SecurityGroupLB: { - ID: "3", - }, - }, - APIServerELB: infrav1.LoadBalancer{ - DNSName: "test-apiserver.us-east-1.aws", - }, - }, - }, - }, - expect: func(m *mocks.MockEC2APIMockRecorder) { - m. - DescribeInstanceTypes(context.TODO(), gomock.Eq(&ec2.DescribeInstanceTypesInput{ - InstanceTypes: []types.InstanceType{ - types.InstanceTypeM6aLarge, - }, - })). - Return(&ec2.DescribeInstanceTypesOutput{ - InstanceTypes: []types.InstanceTypeInfo{ - { - ProcessorInfo: &types.ProcessorInfo{ - SupportedArchitectures: []types.ArchitectureType{ - types.ArchitectureTypeX8664, - }, - }, - }, - }, - }, nil) - m. // TODO: Restore these parameters, but with the tags as well - RunInstances(context.TODO(), gomock.Any()). - DoAndReturn(func(ctx context.Context, input *ec2.RunInstancesInput, optFns ...func(*ec2.Options)) (*ec2.RunInstancesOutput, error) { - if input.CpuOptions != nil { - t.Fatalf("expected no CpuOptions, but got %+v", input.CpuOptions) - } - return &ec2.RunInstancesOutput{ - Instances: []types.Instance{ - { - State: &types.InstanceState{ - Name: types.InstanceStateNamePending, - }, - IamInstanceProfile: &types.IamInstanceProfile{ - Arn: aws.String("arn:aws:iam::123456789012:instance-profile/foo"), - }, - InstanceId: aws.String("two"), - InstanceType: types.InstanceTypeM5Large, - SubnetId: aws.String("subnet-1"), - ImageId: aws.String("ami-1"), - RootDeviceName: aws.String("device-1"), - BlockDeviceMappings: []types.InstanceBlockDeviceMapping{ - { - DeviceName: aws.String("device-1"), - Ebs: &types.EbsInstanceBlockDevice{ - VolumeId: aws.String("volume-1"), - }, - }, - }, - Placement: &types.Placement{ - AvailabilityZone: &az, - }, - }, - }, - }, nil - }) - m. - DescribeNetworkInterfaces(context.TODO(), gomock.Any()). - Return(&ec2.DescribeNetworkInterfacesOutput{ - NetworkInterfaces: []types.NetworkInterface{}, - NextToken: nil, - }, nil) - }, - check: func(instance *infrav1.Instance, err error) { - if err != nil { - t.Fatalf("did not expect error: %v", err) - } - }, - }, } for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { @@ -6171,12 +5921,12 @@ func TestCreateInstance(t *testing.T) { Name: "test1", }, Spec: clusterv1.ClusterSpec{ - ClusterNetwork: &clusterv1.ClusterNetwork{ + ClusterNetwork: clusterv1.ClusterNetwork{ ServiceDomain: "cluster.local", - Services: &clusterv1.NetworkRanges{ + Services: clusterv1.NetworkRanges{ CIDRBlocks: []string{"192.168.0.0/16"}, }, - Pods: &clusterv1.NetworkRanges{ + Pods: clusterv1.NetworkRanges{ CIDRBlocks: []string{"192.168.0.0/16"}, }, }, @@ -6766,3 +6516,45 @@ func TestGetCapacityReservationSpecification(t *testing.T) { }) } } + +func TestGetInstanceCPUOptionsRequest(t *testing.T) { + testCases := []struct { + name string + cpuOptions infrav1.CPUOptions + expectedRequest *types.CpuOptionsRequest + }{ + { + name: "with ConfidentialCompute set to AMD SEV-SNP", + cpuOptions: infrav1.CPUOptions{ + ConfidentialCompute: infrav1.AWSConfidentialComputePolicy("AMDEncryptedVirtualizationNestedPaging"), + }, + expectedRequest: &types.CpuOptionsRequest{ + AmdSevSnp: types.AmdSevSnpSpecificationEnabled, + }, + }, + { + name: "with ConfidentialCompute disabled", + cpuOptions: infrav1.CPUOptions{ + ConfidentialCompute: infrav1.AWSConfidentialComputePolicy("Disabled"), + }, + expectedRequest: &types.CpuOptionsRequest{ + AmdSevSnp: types.AmdSevSnpSpecificationDisabled, + }, + }, + { + name: "with ConfidentialCompute empty", + cpuOptions: infrav1.CPUOptions{ + ConfidentialCompute: "", + }, + expectedRequest: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + request := getInstanceCPUOptionsRequest(tc.cpuOptions) + g := NewWithT(t) + g.Expect(request).To(Equal(tc.expectedRequest)) + }) + } +} diff --git a/pkg/cloud/services/ec2/launchtemplate.go b/pkg/cloud/services/ec2/launchtemplate.go index c805a00e7a..4a1c652fc6 100644 --- a/pkg/cloud/services/ec2/launchtemplate.go +++ b/pkg/cloud/services/ec2/launchtemplate.go @@ -23,8 +23,10 @@ import ( "sort" "strconv" "strings" + "time" "github.com/aws/aws-sdk-go-v2/aws" + autoscalingtypes "github.com/aws/aws-sdk-go-v2/service/autoscaling/types" "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/blang/semver" @@ -35,6 +37,7 @@ import ( corev1 "k8s.io/api/core/v1" apimachinerytypes "k8s.io/apimachinery/pkg/types" "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" @@ -45,8 +48,8 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/utils" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) const ( @@ -70,41 +73,42 @@ func (s *Service) ReconcileLaunchTemplate( s3Scope scope.S3Scope, ec2svc services.EC2Interface, objectStoreSvc services.ObjectStoreInterface, - canUpdateLaunchTemplate func() (bool, error), + canStartInstanceRefresh func() (bool, *autoscalingtypes.InstanceRefreshStatus, error), + cancelInstanceRefresh func() error, runPostLaunchTemplateUpdateOperation func() error, -) error { +) (*ctrl.Result, error) { bootstrapData, bootstrapDataFormat, bootstrapDataSecretKey, err := scope.GetRawBootstrapData() if err != nil { record.Eventf(scope.GetMachinePool(), corev1.EventTypeWarning, "FailedGetBootstrapData", err.Error()) - return err + return nil, err } scope.Info("checking for existing launch template") launchTemplate, launchTemplateUserDataHash, launchTemplateUserDataSecretKey, _, err := ec2svc.GetLaunchTemplate(scope.LaunchTemplateName()) if err != nil { - conditions.MarkUnknown(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateNotFoundReason, "%s", err.Error()) - return err + v1beta1conditions.MarkUnknown(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateNotFoundReason, "%s", err.Error()) + return nil, err } imageID, err := ec2svc.DiscoverLaunchTemplateAMI(ctx, scope) if err != nil { - conditions.MarkFalse(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateCreateFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) - return err + v1beta1conditions.MarkFalse(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateCreateFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + return nil, err } - var ignitionStorageType = infrav1.DefaultMachinePoolIgnitionStorageType + ignitionStorageType := infrav1.DefaultMachinePoolIgnitionStorageType if ignition := ignitionScope.Ignition(); ignition != nil { ignitionStorageType = ignition.StorageType } var userDataForLaunchTemplate []byte if bootstrapDataFormat == "ignition" && ignitionStorageType == infrav1.IgnitionStorageTypeOptionClusterObjectStore { - var ignitionVersion = infrav1.DefaultIgnitionVersion + ignitionVersion := infrav1.DefaultIgnitionVersion if ignition := ignitionScope.Ignition(); ignition != nil { ignitionVersion = ignition.Version } if s3Scope.Bucket() == nil { - return errors.New("using Ignition with `AWSMachinePool.spec.ignition.storageType=ClusterObjectStore` " + + return nil, errors.New("using Ignition with `AWSMachinePool.spec.ignition.storageType=ClusterObjectStore` " + "requires a cluster wide object storage configured at `AWSCluster.spec.s3Bucket`") } @@ -116,17 +120,16 @@ func (s *Service) ReconcileLaunchTemplate( // Previously, user data was always written into the launch template, so we check // `AWSMachinePool.Spec.Ignition != nil` to toggle the S3 feature on for `AWSMachinePool` objects. objectURL, err := objectStoreSvc.CreateForMachinePool(ctx, scope, bootstrapData) - if err != nil { - conditions.MarkFalse(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateReconcileFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) - return err + v1beta1conditions.MarkFalse(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateReconcileFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + return nil, err } semver, err := semver.ParseTolerant(ignitionVersion) if err != nil { err = errors.Wrapf(err, "failed to parse ignition version %q", ignitionVersion) - conditions.MarkFalse(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateReconcileFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) - return err + v1beta1conditions.MarkFalse(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateReconcileFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + return nil, err } // EC2 user data points to S3 @@ -148,8 +151,8 @@ func (s *Service) ReconcileLaunchTemplate( userDataForLaunchTemplate, err = json.Marshal(ignData) if err != nil { err = errors.Wrap(err, "failed to convert ignition config to JSON") - conditions.MarkFalse(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateReconcileFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) - return err + v1beta1conditions.MarkFalse(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateReconcileFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + return nil, err } case 3: ignData := &ignV3Types.Config{ @@ -168,13 +171,13 @@ func (s *Service) ReconcileLaunchTemplate( userDataForLaunchTemplate, err = json.Marshal(ignData) if err != nil { err = errors.Wrap(err, "failed to convert ignition config to JSON") - conditions.MarkFalse(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateReconcileFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) - return err + v1beta1conditions.MarkFalse(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateReconcileFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + return nil, err } default: err = errors.Errorf("unsupported ignition version %q", ignitionVersion) - conditions.MarkFalse(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateReconcileFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) - return err + v1beta1conditions.MarkFalse(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateReconcileFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + return nil, err } } else { // S3 bucket not used, so the bootstrap data is stored directly in the launch template @@ -188,12 +191,12 @@ func (s *Service) ReconcileLaunchTemplate( scope.Info("no existing launch template found, creating") launchTemplateID, err := ec2svc.CreateLaunchTemplate(scope, imageID, *bootstrapDataSecretKey, userDataForLaunchTemplate, userdata.ComputeHash(bootstrapData)) if err != nil { - conditions.MarkFalse(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateCreateFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) - return err + v1beta1conditions.MarkFalse(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateCreateFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + return nil, err } scope.SetLaunchTemplateIDStatus(launchTemplateID) - return scope.PatchObject() + return nil, scope.PatchObject() } // LaunchTemplateID is set during LaunchTemplate creation, but for a scenario such as `clusterctl move`, status fields become blank. @@ -201,28 +204,30 @@ func (s *Service) ReconcileLaunchTemplate( if scope.GetLaunchTemplateIDStatus() == "" { launchTemplateID, err := ec2svc.GetLaunchTemplateID(scope.LaunchTemplateName()) if err != nil { - conditions.MarkUnknown(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateNotFoundReason, "%s", err.Error()) - return err + v1beta1conditions.MarkUnknown(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateNotFoundReason, "%s", err.Error()) + return nil, err } scope.SetLaunchTemplateIDStatus(launchTemplateID) - return scope.PatchObject() + if err = scope.PatchObject(); err != nil { + return nil, err + } } if scope.GetLaunchTemplateLatestVersionStatus() == "" { launchTemplateVersion, err := ec2svc.GetLaunchTemplateLatestVersion(scope.GetLaunchTemplateIDStatus()) if err != nil { - conditions.MarkUnknown(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateNotFoundReason, "%s", err.Error()) - return err + v1beta1conditions.MarkUnknown(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateNotFoundReason, "%s", err.Error()) + return nil, err } scope.SetLaunchTemplateLatestVersionStatus(launchTemplateVersion) - if err := scope.PatchObject(); err != nil { - return err + if err = scope.PatchObject(); err != nil { + return nil, err } } annotation, err := MachinePoolAnnotationJSON(scope, TagsLastAppliedAnnotation) if err != nil { - return err + return nil, err } // Check if the instance tags were changed. If they were, create a new LaunchTemplate. @@ -230,7 +235,7 @@ func (s *Service) ReconcileLaunchTemplate( needsUpdate, err := ec2svc.LaunchTemplateNeedsUpdate(scope, scope.GetLaunchTemplate(), launchTemplate) if err != nil { - return err + return nil, err } amiChanged := *imageID != *launchTemplate.AMI.ID @@ -244,13 +249,29 @@ func (s *Service) ReconcileLaunchTemplate( launchTemplateNeedsUserDataSecretKeyTag := launchTemplateUserDataSecretKey == nil if needsUpdate || tagsChanged || amiChanged || userDataSecretKeyChanged { - canUpdate, err := canUpdateLaunchTemplate() + // More than just the bootstrap token changed + + canStartRefresh, unfinishedRefreshStatus, err := canStartInstanceRefresh() if err != nil { - return err + return nil, err } - if !canUpdate { - conditions.MarkFalse(scope.GetSetter(), expinfrav1.PreLaunchTemplateUpdateCheckCondition, expinfrav1.PreLaunchTemplateUpdateCheckFailedReason, clusterv1.ConditionSeverityWarning, "") - return errors.New("Cannot update the launch template, prerequisite not met") + if !canStartRefresh { + if unfinishedRefreshStatus != nil && *unfinishedRefreshStatus != autoscalingtypes.InstanceRefreshStatusCancelling { + // Until the previous instance refresh goes into `Cancelled` state + // asynchronously, allowing another refresh to be started, + // defer the reconciliation. Otherwise, we get an + // `ErrCodeInstanceRefreshInProgressFault` error if we tried to + // start an instance refresh immediately. + scope.Info("Cancelling previous instance refresh and delaying reconciliation until the next one can be started", "unfinishedRefreshStatus", unfinishedRefreshStatus) + + err := cancelInstanceRefresh() + if err != nil { + return nil, err + } + } else { + scope.Info("Existing instance refresh is not finished, delaying reconciliation until the next one can be started", "unfinishedRefreshStatus", unfinishedRefreshStatus) + } + return &ctrl.Result{RequeueAfter: 30 * time.Second}, nil } } @@ -265,7 +286,7 @@ func (s *Service) ReconcileLaunchTemplate( // We ensure that the number of versions does not grow without bound by following a simple rule: Before we create a new version, we delete one old version, if there is at least one old version that is not in use. deletedLaunchTemplateVersion, err := ec2svc.PruneLaunchTemplateVersions(scope.GetLaunchTemplateIDStatus()) if err != nil { - return err + return nil, err } // S3 objects should be deleted as soon as possible if they're not used @@ -274,14 +295,13 @@ func (s *Service) ReconcileLaunchTemplate( if feature.Gates.Enabled(feature.MachinePool) && deletedLaunchTemplateVersion != nil { _, _, _, deletedLaunchTemplateVersionBootstrapDataHash, err := s.SDKToLaunchTemplate(*deletedLaunchTemplateVersion) if err != nil { - return err + return nil, err } if deletedLaunchTemplateVersionBootstrapDataHash != nil && s3Scope.Bucket() != nil && bootstrapDataFormat == "ignition" && ignitionStorageType == infrav1.IgnitionStorageTypeOptionClusterObjectStore { scope.Info("Deleting S3 object for deleted launch template version", "version", *deletedLaunchTemplateVersion.VersionNumber) err = objectStoreSvc.DeleteForMachinePool(ctx, scope, *deletedLaunchTemplateVersionBootstrapDataHash) - // If any error happened above, log it and continue if err != nil { scope.Error(err, "Failed to delete S3 object for deleted launch template version, continuing because the bucket lifecycle policy will clean it later", "version", *deletedLaunchTemplateVersion.VersionNumber) @@ -290,28 +310,28 @@ func (s *Service) ReconcileLaunchTemplate( } if err := ec2svc.CreateLaunchTemplateVersion(scope.GetLaunchTemplateIDStatus(), scope, imageID, *bootstrapDataSecretKey, userDataForLaunchTemplate, userdata.ComputeHash(bootstrapData)); err != nil { - return err + return nil, err } version, err := ec2svc.GetLaunchTemplateLatestVersion(scope.GetLaunchTemplateIDStatus()) if err != nil { - return err + return nil, err } scope.SetLaunchTemplateLatestVersionStatus(version) if err := scope.PatchObject(); err != nil { - return err + return nil, err } } if needsUpdate || tagsChanged || amiChanged || userDataSecretKeyChanged { if err := runPostLaunchTemplateUpdateOperation(); err != nil { - conditions.MarkFalse(scope.GetSetter(), expinfrav1.PostLaunchTemplateUpdateOperationCondition, expinfrav1.PostLaunchTemplateUpdateOperationFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) - return err + v1beta1conditions.MarkFalse(scope.GetSetter(), expinfrav1.PostLaunchTemplateUpdateOperationCondition, expinfrav1.PostLaunchTemplateUpdateOperationFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + return nil, err } - conditions.MarkTrue(scope.GetSetter(), expinfrav1.PostLaunchTemplateUpdateOperationCondition) + v1beta1conditions.MarkTrue(scope.GetSetter(), expinfrav1.PostLaunchTemplateUpdateOperationCondition) } - return nil + return nil, nil } // ReconcileTags reconciles the tags for the AWSMachinePool instances. @@ -1042,7 +1062,7 @@ func (s *Service) DiscoverLaunchTemplateAMI(ctx context.Context, scope scope.Lau } templateVersion := scope.GetMachinePool().Spec.Template.Spec.Version - if templateVersion == nil { + if templateVersion == "" { err := errors.New("Either AWSMachinePool's spec.awslaunchtemplate.ami.id or MachinePool's spec.template.spec.version must be defined") s.scope.Error(err, "") return nil, err @@ -1083,7 +1103,7 @@ func (s *Service) DiscoverLaunchTemplateAMI(ctx context.Context, scope scope.Lau if scope.IsEKSManaged() && imageLookupFormat == "" && imageLookupOrg == "" && imageLookupBaseOS == "" { lookupAMI, err = s.eksAMILookup( ctx, - *templateVersion, + templateVersion, imageArchitecture, scope.GetLaunchTemplate().AMI.EKSOptimizedLookupType, ) @@ -1096,7 +1116,7 @@ func (s *Service) DiscoverLaunchTemplateAMI(ctx context.Context, scope scope.Lau imageLookupOrg, imageLookupBaseOS, imageArchitecture, - *templateVersion, + templateVersion, ) if err != nil { return nil, err diff --git a/pkg/cloud/services/ec2/launchtemplate_test.go b/pkg/cloud/services/ec2/launchtemplate_test.go index fd4ff8c81a..cb3b53ad77 100644 --- a/pkg/cloud/services/ec2/launchtemplate_test.go +++ b/pkg/cloud/services/ec2/launchtemplate_test.go @@ -43,7 +43,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ssm/mock_ssmiface" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) const ( @@ -82,8 +82,10 @@ users: var testUserDataHash = userdata.ComputeHash([]byte(testUserData)) -var testBootstrapData = []byte("different from testUserData since bootstrap data may be in S3 while EC2 user data points to that S3 object") -var testBootstrapDataHash = userdata.ComputeHash(testBootstrapData) +var ( + testBootstrapData = []byte("different from testUserData since bootstrap data may be in S3 while EC2 user data points to that S3 object") + testBootstrapDataHash = userdata.ComputeHash(testBootstrapData) +) func defaultEC2AndDataTags(name string, clusterName string, userDataSecretKey types.NamespacedName, bootstrapDataHash string) []ec2types.Tag { tags := defaultEC2Tags(name, clusterName) @@ -1800,7 +1802,7 @@ func TestDiscoverLaunchTemplateAMI(t *testing.T) { }, machineTemplate: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: aws.String(DefaultAmiNameFormat), + Version: DefaultAmiNameFormat, }, }, expect: func(m *mocks.MockEC2APIMockRecorder) { @@ -1851,7 +1853,7 @@ func TestDiscoverLaunchTemplateAMI(t *testing.T) { }, machineTemplate: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: aws.String(DefaultAmiNameFormat), + Version: DefaultAmiNameFormat, }, }, expect: func(m *mocks.MockEC2APIMockRecorder) { @@ -1902,7 +1904,7 @@ func TestDiscoverLaunchTemplateAMI(t *testing.T) { }, machineTemplate: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: aws.String(DefaultAmiNameFormat), + Version: DefaultAmiNameFormat, }, }, expect: func(m *mocks.MockEC2APIMockRecorder) { @@ -1974,7 +1976,7 @@ func TestDiscoverLaunchTemplateAMI(t *testing.T) { }, machineTemplate: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: aws.String(DefaultAmiNameFormat), + Version: DefaultAmiNameFormat, }, }, expect: func(m *mocks.MockEC2APIMockRecorder) { diff --git a/pkg/cloud/services/ec2/service.go b/pkg/cloud/services/ec2/service.go index e9bd12c79d..fc237e1991 100644 --- a/pkg/cloud/services/ec2/service.go +++ b/pkg/cloud/services/ec2/service.go @@ -34,6 +34,10 @@ type Service struct { // SSMClient is used to look up the official EKS AMI ID SSMClient ssm.SSMAPI + + // RetryEC2Client is used for dedicated host operations with enhanced retry configuration + // If nil, a new retry client will be created as needed + RetryEC2Client common.EC2API } // NewService returns a new service given the ec2 api client. diff --git a/pkg/cloud/services/eks/cluster.go b/pkg/cloud/services/eks/cluster.go index a16e6d1d34..403051510d 100644 --- a/pkg/cloud/services/eks/cluster.go +++ b/pkg/cloud/services/eks/cluster.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "net" + "strings" "time" "github.com/aws/aws-sdk-go-v2/aws" @@ -35,13 +36,15 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/internal/cidr" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/internal/cmp" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/internal/tristate" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) func (s *Service) reconcileCluster(ctx context.Context) error { @@ -96,7 +99,7 @@ func (s *Service) reconcileCluster(ctx context.Context) error { s.scope.Debug("EKS Control Plane active", "endpoint", *cluster.Endpoint) - s.scope.ControlPlane.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ + s.scope.ControlPlane.Spec.ControlPlaneEndpoint = clusterv1beta1.APIEndpoint{ Host: *cluster.Endpoint, Port: 443, } @@ -121,6 +124,10 @@ func (s *Service) reconcileCluster(ctx context.Context) error { return errors.Wrap(err, "failed reconciling cluster config") } + if err := s.reconcileAccessConfig(ctx, cluster.AccessConfig); err != nil { + return errors.Wrap(err, "failed reconciling access config") + } + if err := s.reconcileLogging(ctx, cluster.Logging); err != nil { return errors.Wrap(err, "failed reconciling logging") } @@ -143,23 +150,7 @@ func (s *Service) reconcileCluster(ctx context.Context) error { // computeCurrentStatusVersion returns the computed current EKS cluster kubernetes version. // The computation has awareness of the fact that EKS clusters only return a major.minor kubernetes version, // and returns a compatible version for te status according to the one the user specified in the spec. -func computeCurrentStatusVersion(specV *string, clusterV *string) *string { - specVersion := "" - if specV != nil { - specVersion = *specV - } - - clusterVersion := "" - if clusterV != nil { - clusterVersion = *clusterV - } - - // Ignore parsing errors as these are already validated by the kubebuilder validation and the AWS API. - // Also specVersion might not be specified in the spec.Version for AWSManagedControlPlane, this results in a "0.0.0" version. - // Also clusterVersion might not yet be returned by the AWS EKS API, as the cluster might still be initializing, this results in a "0.0.0" version. - specSemverVersion, _ := semver.ParseTolerant(specVersion) - currentSemverVersion, _ := semver.ParseTolerant(clusterVersion) - +func computeCurrentStatusVersion(clusterV *string, specSemverVersion semver.Version, currentSemverVersion semver.Version) *string { // If AWS EKS API is not returning a version, set the status.Version to empty string. if currentSemverVersion.String() == "0.0.0" { return ptr.To("") @@ -183,9 +174,27 @@ func computeCurrentStatusVersion(specV *string, clusterV *string) *string { return clusterV } +// parseClusterVersionString parse a version string to semver version. +// If the string cannot be parsed to semver, returning 0.0.0. +func parseClusterVersionString(str *string) semver.Version { + version := "" + if str != nil { + version = *str + } + + // Ignore parsing errors as these are already validated by the kubebuilder validation and the AWS API. + semverVersion, _ := semver.ParseTolerant(version) + return semverVersion +} + func (s *Service) setStatus(cluster *ekstypes.Cluster) error { + // specSemver might not be specified in the spec.Version for AWSManagedControlPlane, this results in a "0.0.0" version. + specSemver := parseClusterVersionString(s.scope.ControlPlane.Spec.Version) + // clusterSemver might not yet be returned by the AWS EKS API, as the cluster might still be initializing, this results in a "0.0.0" version. + clusterSemver := parseClusterVersionString(cluster.Version) + // Set the current Kubernetes control plane version in the status. - s.scope.ControlPlane.Status.Version = computeCurrentStatusVersion(s.scope.ControlPlane.Spec.Version, cluster.Version) + s.scope.ControlPlane.Status.Version = computeCurrentStatusVersion(cluster.Version, specSemver, clusterSemver) // Set the current cluster status in the control plane status. switch cluster.Status { @@ -199,14 +208,27 @@ func (s *Service) setStatus(cluster *ekstypes.Cluster) error { case ekstypes.ClusterStatusActive: s.scope.ControlPlane.Status.Ready = true s.scope.ControlPlane.Status.FailureMessage = nil - if conditions.IsTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneCreatingCondition) { + if v1beta1conditions.IsTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneCreatingCondition) { record.Eventf(s.scope.ControlPlane, "SuccessfulCreateEKSControlPlane", "Created new EKS control plane %s", s.scope.KubernetesClusterName()) - conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneCreatingCondition, "created", clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneCreatingCondition, "created", clusterv1beta1.ConditionSeverityInfo, "") } - if conditions.IsTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneUpdatingCondition) { - conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneUpdatingCondition, "updated", clusterv1.ConditionSeverityInfo, "") + if v1beta1conditions.IsTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneUpdatingCondition) { + v1beta1conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneUpdatingCondition, "updated", clusterv1beta1.ConditionSeverityInfo, "") record.Eventf(s.scope.ControlPlane, "SuccessfulUpdateEKSControlPlane", "Updated EKS control plane %s", s.scope.KubernetesClusterName()) } + if s.scope.ControlPlane.Spec.UpgradePolicy == ekscontrolplanev1.UpgradePolicyStandard && + (specSemver.Major < clusterSemver.Major || + (specSemver.Major == clusterSemver.Major && specSemver.Minor < clusterSemver.Minor)) { + s.scope.ControlPlane.Status.Ready = false + failureMsg := fmt.Sprintf( + "EKS control plane %s was automatically upgraded to version %s because %s is out of standard support. "+ + "This can be fixed by changing to the version of the AWSManagedControlPlane to the one reported in the status", + s.scope.KubernetesClusterName(), + clusterSemver.String(), + specSemver.String(), + ) + s.scope.ControlPlane.Status.FailureMessage = &failureMsg + } // TODO FailureReason case ekstypes.ClusterStatusCreating: s.scope.ControlPlane.Status.Ready = false @@ -422,6 +444,20 @@ func (s *Service) createCluster(ctx context.Context, eksClusterName string) (*ek return nil, errors.Wrap(err, "couldn't create vpc config for cluster") } + var accessConfig *ekstypes.CreateAccessConfigRequest + if s.scope.ControlPlane.Spec.AccessConfig != nil && s.scope.ControlPlane.Spec.AccessConfig.AuthenticationMode != "" { + accessConfig = &ekstypes.CreateAccessConfigRequest{ + AuthenticationMode: s.scope.ControlPlane.Spec.AccessConfig.AuthenticationMode.APIValue(), + } + } + + if s.scope.ControlPlane.Spec.AccessConfig != nil && s.scope.ControlPlane.Spec.AccessConfig.BootstrapClusterCreatorAdminPermissions != nil { + if accessConfig == nil { + accessConfig = &ekstypes.CreateAccessConfigRequest{} + } + accessConfig.BootstrapClusterCreatorAdminPermissions = s.scope.ControlPlane.Spec.AccessConfig.BootstrapClusterCreatorAdminPermissions + } + var netConfig *ekstypes.KubernetesNetworkConfigRequest if s.scope.VPC().IsIPv6Enabled() { netConfig = &ekstypes.KubernetesNetworkConfigRequest{ @@ -460,17 +496,27 @@ func (s *Service) createCluster(ctx context.Context, eksClusterName string) (*ek eksVersion = &v } + var upgradePolicy *ekstypes.UpgradePolicyRequest + + if s.scope.ControlPlane.Spec.UpgradePolicy != "" { + upgradePolicy = &ekstypes.UpgradePolicyRequest{ + SupportType: converters.SupportTypeToSDK(s.scope.ControlPlane.Spec.UpgradePolicy), + } + } + bootstrapAddon := s.scope.BootstrapSelfManagedAddons() input := &eks.CreateClusterInput{ Name: aws.String(eksClusterName), Version: eksVersion, Logging: logging, + AccessConfig: accessConfig, EncryptionConfig: encryptionConfigs, ResourcesVpcConfig: vpcConfig, RoleArn: role.Arn, Tags: tags, KubernetesNetworkConfig: netConfig, BootstrapSelfManagedAddons: bootstrapAddon, + UpgradePolicy: upgradePolicy, } var out *eks.CreateClusterOutput @@ -478,7 +524,7 @@ func (s *Service) createCluster(ctx context.Context, eksClusterName string) (*ek if out, err = s.EKSClient.CreateCluster(ctx, input); err != nil { return false, err } - conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneCreatingCondition) + v1beta1conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneCreatingCondition) record.Eventf(s.scope.ControlPlane, "InitiatedCreateEKSControlPlane", "Initiated creation of a new EKS control plane %s", s.scope.KubernetesClusterName()) return true, nil }, awserrors.ResourceNotFound); err != nil { // TODO: change the error that can be retried @@ -526,12 +572,17 @@ func (s *Service) reconcileClusterConfig(ctx context.Context, cluster *ekstypes. input.ResourcesVpcConfig = updateVpcConfig } + if updateUpgradePolicy := s.reconcileUpgradePolicy(cluster.UpgradePolicy); updateUpgradePolicy != nil { + needsUpdate = true + input.UpgradePolicy = updateUpgradePolicy + } + if needsUpdate { if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) { if _, err := s.EKSClient.UpdateClusterConfig(ctx, input); err != nil { return false, err } - conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneUpdatingCondition) + v1beta1conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneUpdatingCondition) record.Eventf(s.scope.ControlPlane, "InitiatedUpdateEKSControlPlane", "Initiated update of a new EKS control plane %s", s.scope.KubernetesClusterName()) return true, nil }); err != nil { @@ -542,6 +593,50 @@ func (s *Service) reconcileClusterConfig(ctx context.Context, cluster *ekstypes. return nil } +func (s *Service) reconcileAccessConfig(ctx context.Context, accessConfig *ekstypes.AccessConfigResponse) error { + input := &eks.UpdateClusterConfigInput{Name: aws.String(s.scope.KubernetesClusterName())} + + if s.scope.ControlPlane.Spec.AccessConfig == nil || s.scope.ControlPlane.Spec.AccessConfig.AuthenticationMode == "" { + return nil + } + + expectedAuthenticationMode := s.scope.ControlPlane.Spec.AccessConfig.AuthenticationMode.APIValue() + s.scope.Debug("Reconciling EKS Access Config for cluster", "cluster-name", s.scope.KubernetesClusterName(), "expected", expectedAuthenticationMode, "current", accessConfig.AuthenticationMode) + if expectedAuthenticationMode != accessConfig.AuthenticationMode { + input.AccessConfig = &ekstypes.UpdateAccessConfigRequest{ + AuthenticationMode: expectedAuthenticationMode, + } + } + + if input.AccessConfig != nil { + if err := wait.WaitForWithRetryable(wait.NewBackoff(), func() (bool, error) { + if _, err := s.EKSClient.UpdateClusterConfig(ctx, input); err != nil { + return false, err + } + + // Wait until status transitions to UPDATING because there's a short + // window after UpdateClusterConfig returns where the cluster + // status is ACTIVE and the update would be tried again + if err := s.EKSClient.WaitUntilClusterUpdating( + ctx, + &eks.DescribeClusterInput{Name: aws.String(s.scope.KubernetesClusterName())}, + s.scope.MaxWaitActiveUpdateDelete, + ); err != nil { + return false, err + } + + v1beta1conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneUpdatingCondition) + record.Eventf(s.scope.ControlPlane, "InitiatedUpdateEKSControlPlane", "Initiated auth config update for EKS control plane %s", s.scope.KubernetesClusterName()) + return true, nil + }); err != nil { + record.Warnf(s.scope.ControlPlane, "FailedUpdateEKSControlPlane", "Failed to update EKS control plane auth config: %v", err) + return errors.Wrapf(err, "failed to update EKS cluster") + } + } + + return nil +} + func (s *Service) reconcileLogging(ctx context.Context, logging *ekstypes.Logging) error { input := &eks.UpdateClusterConfigInput{Name: aws.String(s.scope.KubernetesClusterName())} @@ -559,7 +654,7 @@ func (s *Service) reconcileLogging(ctx context.Context, logging *ekstypes.Loggin if _, err := s.EKSClient.UpdateClusterConfig(ctx, input); err != nil { return false, err } - conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneUpdatingCondition) + v1beta1conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneUpdatingCondition) record.Eventf(s.scope.ControlPlane, "InitiatedUpdateEKSControlPlane", "Initiated logging update for EKS control plane %s", s.scope.KubernetesClusterName()) return true, nil }); err != nil { @@ -707,7 +802,7 @@ func (s *Service) reconcileClusterVersion(ctx context.Context, cluster *ekstypes return false, err } - conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneUpdatingCondition) + v1beta1conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneUpdatingCondition) record.Eventf(s.scope.ControlPlane, "InitiatedUpdateEKSControlPlane", "Initiated update of EKS control plane %s to version %s", s.scope.KubernetesClusterName(), nextVersionString) return true, nil @@ -719,6 +814,26 @@ func (s *Service) reconcileClusterVersion(ctx context.Context, cluster *ekstypes return nil } +func (s *Service) reconcileUpgradePolicy(upgradePolicy *ekstypes.UpgradePolicyResponse) *ekstypes.UpgradePolicyRequest { + // Should not update when cluster upgrade policy is unknown + if upgradePolicy == nil { + return nil + } + + // Cluster stay unchanged when upgrade policy omitted + if s.scope.ControlPlane.Spec.UpgradePolicy == "" { + return nil + } + + if strings.ToLower(string(upgradePolicy.SupportType)) == s.scope.ControlPlane.Spec.UpgradePolicy.String() { + return nil + } + + return &ekstypes.UpgradePolicyRequest{ + SupportType: converters.SupportTypeToSDK(s.scope.ControlPlane.Spec.UpgradePolicy), + } +} + func (s *Service) describeEKSCluster(ctx context.Context, eksClusterName string) (*ekstypes.Cluster, error) { input := &eks.DescribeClusterInput{ Name: aws.String(eksClusterName), @@ -758,7 +873,7 @@ func (s *Service) updateEncryptionConfig(ctx context.Context, updatedEncryptionC return false, err } - conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneUpdatingCondition) + v1beta1conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneUpdatingCondition) record.Eventf(s.scope.ControlPlane, "InitiatedUpdateEncryptionConfig", "Initiated update of encryption config in EKS control plane %s", s.scope.KubernetesClusterName()) return true, nil diff --git a/pkg/cloud/services/eks/cluster_test.go b/pkg/cloud/services/eks/cluster_test.go index 7e397f329e..f51ee4c340 100644 --- a/pkg/cloud/services/eks/cluster_test.go +++ b/pkg/cloud/services/eks/cluster_test.go @@ -39,7 +39,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks/mock_eksiface" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/iamauth/mock_iamauth" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func TestMakeEKSEncryptionConfigs(t *testing.T) { @@ -474,6 +474,123 @@ func TestReconcileClusterVersion(t *testing.T) { } } +func TestReconcileAccessConfig(t *testing.T) { + clusterName := "default.cluster" + tests := []struct { + name string + expect func(m *mock_eksiface.MockEKSAPIMockRecorder) + expectError bool + }{ + { + name: "no upgrade necessary", + expect: func(m *mock_eksiface.MockEKSAPIMockRecorder) { + m. + DescribeCluster(gomock.Eq(context.TODO()), gomock.AssignableToTypeOf(&eks.DescribeClusterInput{})). + Return(&eks.DescribeClusterOutput{ + Cluster: &ekstypes.Cluster{ + Name: aws.String("default.cluster"), + AccessConfig: &ekstypes.AccessConfigResponse{ + AuthenticationMode: ekstypes.AuthenticationModeApiAndConfigMap, + }, + }, + }, nil) + }, + expectError: false, + }, + { + name: "needs upgrade", + expect: func(m *mock_eksiface.MockEKSAPIMockRecorder) { + m. + DescribeCluster(gomock.Eq(context.TODO()), gomock.AssignableToTypeOf(&eks.DescribeClusterInput{})). + Return(&eks.DescribeClusterOutput{ + Cluster: &ekstypes.Cluster{ + Name: aws.String("default.cluster"), + AccessConfig: &ekstypes.AccessConfigResponse{ + AuthenticationMode: ekstypes.AuthenticationModeConfigMap, + }, + }, + }, nil) + m.WaitUntilClusterUpdating( + gomock.Eq(context.TODO()), + gomock.AssignableToTypeOf(&eks.DescribeClusterInput{}), + gomock.Any(), + ).Return(nil) + m. + UpdateClusterConfig(gomock.Eq(context.TODO()), gomock.AssignableToTypeOf(&eks.UpdateClusterConfigInput{})). + Return(&eks.UpdateClusterConfigOutput{}, nil) + }, + expectError: false, + }, + { + name: "api error", + expect: func(m *mock_eksiface.MockEKSAPIMockRecorder) { + m. + DescribeCluster(gomock.Eq(context.TODO()), gomock.AssignableToTypeOf(&eks.DescribeClusterInput{})). + Return(&eks.DescribeClusterOutput{ + Cluster: &ekstypes.Cluster{ + Name: aws.String("default.cluster"), + AccessConfig: &ekstypes.AccessConfigResponse{ + AuthenticationMode: ekstypes.AuthenticationModeApi, + }, + }, + }, nil) + m. + UpdateClusterConfig(gomock.Eq(context.TODO()), gomock.AssignableToTypeOf(&eks.UpdateClusterConfigInput{})). + Return(&eks.UpdateClusterConfigOutput{}, errors.New("Unsupported authentication mode update")) + }, + expectError: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + mockControl := gomock.NewController(t) + defer mockControl.Finish() + + eksMock := mock_eksiface.NewMockEKSAPI(mockControl) + + scheme := runtime.NewScheme() + _ = infrav1.AddToScheme(scheme) + _ = ekscontrolplanev1.AddToScheme(scheme) + client := fake.NewClientBuilder().WithScheme(scheme).Build() + scope, err := scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{ + Client: client, + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: clusterName, + }, + }, + ControlPlane: &ekscontrolplanev1.AWSManagedControlPlane{ + Spec: ekscontrolplanev1.AWSManagedControlPlaneSpec{ + EKSClusterName: clusterName, + AccessConfig: &ekscontrolplanev1.AccessConfig{ + AuthenticationMode: ekscontrolplanev1.EKSAuthenticationModeAPIAndConfigMap, + }, + }, + }, + }) + g.Expect(err).To(BeNil()) + + tc.expect(eksMock.EXPECT()) + s := NewService(scope) + s.EKSClient = eksMock + + cluster, err := s.describeEKSCluster(context.TODO(), clusterName) + g.Expect(err).To(BeNil()) + + err = s.reconcileAccessConfig(context.TODO(), cluster.AccessConfig) + if tc.expectError { + g.Expect(err).To(HaveOccurred()) + return + } + g.Expect(err).To(BeNil()) + }) + } +} + func TestCreateCluster(t *testing.T) { clusterName := "cluster.default" version := aws.String("1.24") @@ -535,6 +652,7 @@ func TestCreateCluster(t *testing.T) { RoleName: tc.role, NetworkSpec: infrav1.NetworkSpec{Subnets: tc.subnets}, BootstrapSelfManagedAddons: false, + UpgradePolicy: ekscontrolplanev1.UpgradePolicyStandard, }, }, }) @@ -557,6 +675,9 @@ func TestCreateCluster(t *testing.T) { Tags: tc.tags, Version: version, BootstrapSelfManagedAddons: aws.Bool(false), + UpgradePolicy: &ekstypes.UpgradePolicyRequest{ + SupportType: ekstypes.SupportTypeStandard, + }, }).Return(&eks.CreateClusterOutput{}, nil) } s := NewService(scope) @@ -688,6 +809,91 @@ func TestReconcileEKSEncryptionConfig(t *testing.T) { } } +func TestReconcileUpgradePolicy(t *testing.T) { + clusterName := "default.cluster" + tests := []struct { + name string + oldUpgradePolicy *ekstypes.UpgradePolicyResponse + newUpgradePolicy ekscontrolplanev1.UpgradePolicy + expect *ekstypes.UpgradePolicyRequest + expectError bool + }{ + { + name: "no update necessary - upgrade policy omitted", + oldUpgradePolicy: &ekstypes.UpgradePolicyResponse{ + SupportType: ekstypes.SupportTypeStandard, + }, + expect: nil, + expectError: false, + }, + { + name: "no update necessary - cannot get cluster upgrade policy", + newUpgradePolicy: ekscontrolplanev1.UpgradePolicyStandard, + expect: nil, + expectError: false, + }, + { + name: "no update necessary - upgrade policy unchanged", + oldUpgradePolicy: &ekstypes.UpgradePolicyResponse{ + SupportType: ekstypes.SupportTypeStandard, + }, + newUpgradePolicy: ekscontrolplanev1.UpgradePolicyStandard, + expect: nil, + expectError: false, + }, + { + name: "needs update", + oldUpgradePolicy: &ekstypes.UpgradePolicyResponse{ + SupportType: ekstypes.SupportTypeStandard, + }, + newUpgradePolicy: ekscontrolplanev1.UpgradePolicyExtended, + expect: &ekstypes.UpgradePolicyRequest{ + SupportType: ekstypes.SupportTypeExtended, + }, + expectError: false, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + + mockControl := gomock.NewController(t) + defer mockControl.Finish() + + scheme := runtime.NewScheme() + _ = infrav1.AddToScheme(scheme) + _ = ekscontrolplanev1.AddToScheme(scheme) + client := fake.NewClientBuilder().WithScheme(scheme).Build() + scope, err := scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{ + Client: client, + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: clusterName, + }, + }, + ControlPlane: &ekscontrolplanev1.AWSManagedControlPlane{ + Spec: ekscontrolplanev1.AWSManagedControlPlaneSpec{ + Version: aws.String("1.16"), + UpgradePolicy: tc.newUpgradePolicy, + }, + }, + }) + g.Expect(err).To(BeNil()) + + s := NewService(scope) + + upgradePolicyRequest := s.reconcileUpgradePolicy(tc.oldUpgradePolicy) + if tc.expectError { + g.Expect(err).To(HaveOccurred()) + return + } + g.Expect(upgradePolicyRequest).To(Equal(tc.expect)) + }) + } +} + func TestCreateIPv6Cluster(t *testing.T) { g := NewWithT(t) @@ -787,3 +993,74 @@ func TestCreateIPv6Cluster(t *testing.T) { _, err = s.createCluster(context.TODO(), "cluster-name") g.Expect(err).To(BeNil()) } + +func TestCreateClusterWithBootstrapClusterCreatorAdminPermissions(t *testing.T) { + g := NewWithT(t) + + mockControl := gomock.NewController(t) + defer mockControl.Finish() + + eksMock := mock_eksiface.NewMockEKSAPI(mockControl) + iamMock := mock_iamauth.NewMockIAMAPI(mockControl) + + scheme := runtime.NewScheme() + _ = infrav1.AddToScheme(scheme) + _ = ekscontrolplanev1.AddToScheme(scheme) + client := fake.NewClientBuilder().WithScheme(scheme).Build() + + clusterName := "test-cluster" + scope, err := scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{ + Client: client, + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns", + Name: "capi-name", + }, + }, + ControlPlane: &ekscontrolplanev1.AWSManagedControlPlane{ + Spec: ekscontrolplanev1.AWSManagedControlPlaneSpec{ + EKSClusterName: clusterName, + Version: aws.String("1.24"), + RoleName: aws.String("arn:role"), + NetworkSpec: infrav1.NetworkSpec{ + Subnets: []infrav1.SubnetSpec{ + {ID: "1", AvailabilityZone: "us-west-2a"}, + {ID: "2", AvailabilityZone: "us-west-2b"}, + }, + }, + AccessConfig: &ekscontrolplanev1.AccessConfig{ + BootstrapClusterCreatorAdminPermissions: ptr.To(false), + }, + }, + }, + }) + g.Expect(err).To(BeNil()) + + eksMock.EXPECT().CreateCluster(context.TODO(), &eks.CreateClusterInput{ + Name: aws.String(clusterName), + Version: aws.String("1.24"), + ResourcesVpcConfig: &ekstypes.VpcConfigRequest{ + SubnetIds: []string{"1", "2"}, + }, + RoleArn: aws.String("arn:role"), + Tags: map[string]string{ + "kubernetes.io/cluster/test-cluster": "owned", + }, + AccessConfig: &ekstypes.CreateAccessConfigRequest{ + BootstrapClusterCreatorAdminPermissions: ptr.To(false), + }, + EncryptionConfig: []ekstypes.EncryptionConfig{}, + BootstrapSelfManagedAddons: aws.Bool(false), + }).Return(&eks.CreateClusterOutput{}, nil) + + iamMock.EXPECT().GetRole(gomock.Any(), gomock.Any()).Return(&iam.GetRoleOutput{ + Role: &iamtypes.Role{Arn: aws.String("arn:role")}, + }, nil) + + s := NewService(scope) + s.EKSClient = eksMock + s.IAMClient = iamMock + + _, err = s.createCluster(context.TODO(), clusterName) + g.Expect(err).To(BeNil()) +} diff --git a/pkg/cloud/services/eks/config.go b/pkg/cloud/services/eks/config.go index 153f293682..0ac413f729 100644 --- a/pkg/cloud/services/eks/config.go +++ b/pkg/cloud/services/eks/config.go @@ -35,7 +35,7 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/kubeconfig" "sigs.k8s.io/cluster-api/util/secret" diff --git a/pkg/cloud/services/eks/config_test.go b/pkg/cloud/services/eks/config_test.go index d6f64bd071..c9c3334a14 100644 --- a/pkg/cloud/services/eks/config_test.go +++ b/pkg/cloud/services/eks/config_test.go @@ -20,7 +20,7 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/sts/mock_stsiface" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/secret" ) diff --git a/pkg/cloud/services/eks/eks.go b/pkg/cloud/services/eks/eks.go index 05b760fa44..c91994aa72 100644 --- a/pkg/cloud/services/eks/eks.go +++ b/pkg/cloud/services/eks/eks.go @@ -27,8 +27,8 @@ import ( expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) // ReconcileControlPlane reconciles a EKS control plane. @@ -37,31 +37,31 @@ func (s *Service) ReconcileControlPlane(ctx context.Context) error { // Control Plane IAM Role if err := s.reconcileControlPlaneIAMRole(ctx); err != nil { - conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1.IAMControlPlaneRolesReadyCondition, ekscontrolplanev1.IAMControlPlaneRolesReconciliationFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1.IAMControlPlaneRolesReadyCondition, ekscontrolplanev1.IAMControlPlaneRolesReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return err } - conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.IAMControlPlaneRolesReadyCondition) + v1beta1conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.IAMControlPlaneRolesReadyCondition) // EKS Cluster if err := s.reconcileCluster(ctx); err != nil { - conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneReadyCondition, ekscontrolplanev1.EKSControlPlaneReconciliationFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneReadyCondition, ekscontrolplanev1.EKSControlPlaneReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return err } - conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneReadyCondition) + v1beta1conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneReadyCondition) // EKS Addons if err := s.reconcileAddons(ctx); err != nil { - conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1.EKSAddonsConfiguredCondition, ekscontrolplanev1.EKSAddonsConfiguredFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1.EKSAddonsConfiguredCondition, ekscontrolplanev1.EKSAddonsConfiguredFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return errors.Wrap(err, "failed reconciling eks addons") } - conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSAddonsConfiguredCondition) + v1beta1conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSAddonsConfiguredCondition) // EKS Identity Provider if err := s.reconcileIdentityProvider(ctx); err != nil { - conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1.EKSIdentityProviderConfiguredCondition, ekscontrolplanev1.EKSIdentityProviderConfiguredFailedReason, clusterv1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1.EKSIdentityProviderConfiguredCondition, ekscontrolplanev1.EKSIdentityProviderConfiguredFailedReason, clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return errors.Wrap(err, "failed reconciling eks identity provider") } - conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSIdentityProviderConfiguredCondition) + v1beta1conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSIdentityProviderConfiguredCondition) s.scope.Debug("Reconcile EKS control plane completed successfully") return nil @@ -95,30 +95,30 @@ func (s *NodegroupService) ReconcilePool(ctx context.Context) error { s.scope.Debug("Reconciling EKS nodegroup") if err := s.reconcileNodegroupIAMRole(ctx); err != nil { - conditions.MarkFalse( + v1beta1conditions.MarkFalse( s.scope.ManagedMachinePool, expinfrav1.IAMNodegroupRolesReadyCondition, expinfrav1.IAMNodegroupRolesReconciliationFailedReason, - clusterv1.ConditionSeverityError, + clusterv1beta1.ConditionSeverityError, "%s", err.Error(), ) return err } - conditions.MarkTrue(s.scope.ManagedMachinePool, expinfrav1.IAMNodegroupRolesReadyCondition) + v1beta1conditions.MarkTrue(s.scope.ManagedMachinePool, expinfrav1.IAMNodegroupRolesReadyCondition) if err := s.reconcileNodegroup(ctx); err != nil { - conditions.MarkFalse( + v1beta1conditions.MarkFalse( s.scope.ManagedMachinePool, expinfrav1.EKSNodegroupReadyCondition, expinfrav1.EKSNodegroupReconciliationFailedReason, - clusterv1.ConditionSeverityError, + clusterv1beta1.ConditionSeverityError, "%s", err.Error(), ) return err } - conditions.MarkTrue(s.scope.ManagedMachinePool, expinfrav1.EKSNodegroupReadyCondition) + v1beta1conditions.MarkTrue(s.scope.ManagedMachinePool, expinfrav1.EKSNodegroupReadyCondition) return nil } diff --git a/pkg/cloud/services/eks/fargate.go b/pkg/cloud/services/eks/fargate.go index 06cd5ffac2..d31d5c2110 100644 --- a/pkg/cloud/services/eks/fargate.go +++ b/pkg/cloud/services/eks/fargate.go @@ -32,8 +32,8 @@ import ( expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) func requeueProfileUpdating() reconcile.Result { @@ -50,11 +50,11 @@ func (s *FargateService) Reconcile(ctx context.Context) (reconcile.Result, error requeue, err := s.reconcileFargateIAMRole(ctx) if err != nil { - conditions.MarkFalse( + v1beta1conditions.MarkFalse( s.scope.FargateProfile, expinfrav1.IAMFargateRolesReadyCondition, expinfrav1.IAMFargateRolesReconciliationFailedReason, - clusterv1.ConditionSeverityError, + clusterv1beta1.ConditionSeverityError, "%s", err.Error(), ) @@ -66,15 +66,15 @@ func (s *FargateService) Reconcile(ctx context.Context) (reconcile.Result, error return requeueRoleUpdating(), nil } - conditions.MarkTrue(s.scope.FargateProfile, expinfrav1.IAMFargateRolesReadyCondition) + v1beta1conditions.MarkTrue(s.scope.FargateProfile, expinfrav1.IAMFargateRolesReadyCondition) requeue, err = s.reconcileFargateProfile(ctx) if err != nil { - conditions.MarkFalse( + v1beta1conditions.MarkFalse( s.scope.FargateProfile, - clusterv1.ReadyCondition, + clusterv1beta1.ReadyCondition, expinfrav1.EKSFargateReconciliationFailedReason, - clusterv1.ConditionSeverityError, + clusterv1beta1.ConditionSeverityError, "%s", err.Error(), ) @@ -124,34 +124,34 @@ func (s *FargateService) handleStatus(profile *ekstypes.FargateProfile) (requeue switch profile.Status { case ekstypes.FargateProfileStatusCreating: s.scope.FargateProfile.Status.Ready = false - if conditions.IsTrue(s.scope.FargateProfile, expinfrav1.EKSFargateDeletingCondition) { - conditions.MarkFalse(s.scope.FargateProfile, expinfrav1.EKSFargateDeletingCondition, expinfrav1.EKSFargateCreatingReason, clusterv1.ConditionSeverityInfo, "") + if v1beta1conditions.IsTrue(s.scope.FargateProfile, expinfrav1.EKSFargateDeletingCondition) { + v1beta1conditions.MarkFalse(s.scope.FargateProfile, expinfrav1.EKSFargateDeletingCondition, expinfrav1.EKSFargateCreatingReason, clusterv1beta1.ConditionSeverityInfo, "") } - if !conditions.IsTrue(s.scope.FargateProfile, expinfrav1.EKSFargateCreatingCondition) { + if !v1beta1conditions.IsTrue(s.scope.FargateProfile, expinfrav1.EKSFargateCreatingCondition) { record.Eventf(s.scope.FargateProfile, "InitiatedCreateEKSFargateProfile", "Started creating EKS fargate profile %s", s.scope.FargateProfile.Spec.ProfileName) - conditions.MarkTrue(s.scope.FargateProfile, expinfrav1.EKSFargateCreatingCondition) + v1beta1conditions.MarkTrue(s.scope.FargateProfile, expinfrav1.EKSFargateCreatingCondition) } - conditions.MarkFalse(s.scope.FargateProfile, expinfrav1.EKSFargateProfileReadyCondition, expinfrav1.EKSFargateCreatingReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.FargateProfile, expinfrav1.EKSFargateProfileReadyCondition, expinfrav1.EKSFargateCreatingReason, clusterv1beta1.ConditionSeverityInfo, "") case ekstypes.FargateProfileStatusCreateFailed, ekstypes.FargateProfileStatusDeleteFailed: s.scope.FargateProfile.Status.Ready = false s.scope.FargateProfile.Status.FailureMessage = aws.String(fmt.Sprintf("unexpected profile status: %s", string(profile.Status))) reason := expinfrav1.EKSFargateFailedReason s.scope.FargateProfile.Status.FailureReason = &reason - conditions.MarkFalse(s.scope.FargateProfile, expinfrav1.EKSFargateProfileReadyCondition, expinfrav1.EKSFargateFailedReason, clusterv1.ConditionSeverityError, "") + v1beta1conditions.MarkFalse(s.scope.FargateProfile, expinfrav1.EKSFargateProfileReadyCondition, expinfrav1.EKSFargateFailedReason, clusterv1beta1.ConditionSeverityError, "") case ekstypes.FargateProfileStatusActive: s.scope.FargateProfile.Status.Ready = true - if conditions.IsTrue(s.scope.FargateProfile, expinfrav1.EKSFargateCreatingCondition) { + if v1beta1conditions.IsTrue(s.scope.FargateProfile, expinfrav1.EKSFargateCreatingCondition) { record.Eventf(s.scope.FargateProfile, "SuccessfulCreateEKSFargateProfile", "Created new EKS fargate profile %s", s.scope.FargateProfile.Spec.ProfileName) - conditions.MarkFalse(s.scope.FargateProfile, expinfrav1.EKSFargateCreatingCondition, expinfrav1.EKSFargateCreatedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.FargateProfile, expinfrav1.EKSFargateCreatingCondition, expinfrav1.EKSFargateCreatedReason, clusterv1beta1.ConditionSeverityInfo, "") } - conditions.MarkTrue(s.scope.FargateProfile, expinfrav1.EKSFargateProfileReadyCondition) + v1beta1conditions.MarkTrue(s.scope.FargateProfile, expinfrav1.EKSFargateProfileReadyCondition) case ekstypes.FargateProfileStatusDeleting: s.scope.FargateProfile.Status.Ready = false - if !conditions.IsTrue(s.scope.FargateProfile, expinfrav1.EKSFargateDeletingCondition) { + if !v1beta1conditions.IsTrue(s.scope.FargateProfile, expinfrav1.EKSFargateDeletingCondition) { record.Eventf(s.scope.FargateProfile, "InitiatedDeleteEKSFargateProfile", "Started deleting EKS fargate profile %s", s.scope.FargateProfile.Spec.ProfileName) - conditions.MarkTrue(s.scope.FargateProfile, expinfrav1.EKSFargateDeletingCondition) + v1beta1conditions.MarkTrue(s.scope.FargateProfile, expinfrav1.EKSFargateDeletingCondition) } - conditions.MarkFalse(s.scope.FargateProfile, expinfrav1.EKSFargateProfileReadyCondition, expinfrav1.EKSFargateDeletingReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.FargateProfile, expinfrav1.EKSFargateProfileReadyCondition, expinfrav1.EKSFargateDeletingReason, clusterv1beta1.ConditionSeverityInfo, "") } switch profile.Status { case ekstypes.FargateProfileStatusCreating, ekstypes.FargateProfileStatusDeleting: @@ -167,11 +167,11 @@ func (s *FargateService) ReconcileDelete(ctx context.Context) (reconcile.Result, requeue, err := s.deleteFargateProfile(ctx) if err != nil { - conditions.MarkFalse( + v1beta1conditions.MarkFalse( s.scope.FargateProfile, - clusterv1.ReadyCondition, + clusterv1beta1.ReadyCondition, expinfrav1.EKSFargateReconciliationFailedReason, - clusterv1.ConditionSeverityError, + clusterv1beta1.ConditionSeverityError, "%s", err.Error(), ) @@ -184,11 +184,11 @@ func (s *FargateService) ReconcileDelete(ctx context.Context) (reconcile.Result, err = s.deleteFargateIAMRole(ctx) if err != nil { - conditions.MarkFalse( + v1beta1conditions.MarkFalse( s.scope.FargateProfile, expinfrav1.IAMFargateRolesReadyCondition, expinfrav1.IAMFargateRolesReconciliationFailedReason, - clusterv1.ConditionSeverityError, + clusterv1beta1.ConditionSeverityError, "%s", err.Error(), ) @@ -272,11 +272,11 @@ func (s *FargateService) deleteFargateProfile(ctx context.Context) (requeue bool return false, errors.Wrap(err, "failed to describe profile") } if profile == nil { - if conditions.IsTrue(s.scope.FargateProfile, expinfrav1.EKSFargateDeletingCondition) { + if v1beta1conditions.IsTrue(s.scope.FargateProfile, expinfrav1.EKSFargateDeletingCondition) { record.Eventf(s.scope.FargateProfile, "SuccessfulDeleteEKSFargateProfile", "Deleted EKS fargate profile %s", s.scope.FargateProfile.Spec.ProfileName) - conditions.MarkFalse(s.scope.FargateProfile, expinfrav1.EKSFargateDeletingCondition, expinfrav1.EKSFargateDeletedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.FargateProfile, expinfrav1.EKSFargateDeletingCondition, expinfrav1.EKSFargateDeletedReason, clusterv1beta1.ConditionSeverityInfo, "") } - conditions.MarkFalse(s.scope.FargateProfile, expinfrav1.EKSFargateProfileReadyCondition, expinfrav1.EKSFargateDeletedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.FargateProfile, expinfrav1.EKSFargateProfileReadyCondition, expinfrav1.EKSFargateDeletedReason, clusterv1beta1.ConditionSeverityInfo, "") return false, nil } diff --git a/pkg/cloud/services/eks/nodegroup.go b/pkg/cloud/services/eks/nodegroup.go index eb1430ffe6..e89c97b16a 100644 --- a/pkg/cloud/services/eks/nodegroup.go +++ b/pkg/cloud/services/eks/nodegroup.go @@ -28,6 +28,7 @@ import ( ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" iamtypes "github.com/aws/aws-sdk-go-v2/service/iam/types" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "github.com/pkg/errors" "k8s.io/apimachinery/pkg/util/version" @@ -38,7 +39,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/annotations" ) @@ -119,6 +120,11 @@ func (s *NodegroupService) updateConfig() (*ekstypes.NodegroupUpdateConfig, erro return converters.NodegroupUpdateconfigToSDK(updateConfig) } +func (s *NodegroupService) nodeRepairConfig() *ekstypes.NodeRepairConfig { + repairConfig := s.scope.ManagedMachinePool.Spec.NodeRepairConfig + return converters.NodeRepairConfigToSDK(repairConfig) +} + func (s *NodegroupService) roleArn(ctx context.Context) (*string, error) { var role *iamtypes.Role if s.scope.RoleName() != "" { @@ -249,6 +255,9 @@ func (s *NodegroupService) createNodegroup(ctx context.Context) (*ekstypes.Nodeg Version: s.scope.ManagedMachinePool.Status.LaunchTemplateVersion, } } + if managedPool.NodeRepairConfig != nil { + input.NodeRepairConfig = s.nodeRepairConfig() + } out, err := s.EKSClient.CreateNodegroup(ctx, input) if err != nil { @@ -266,7 +275,7 @@ func (s *NodegroupService) createNodegroup(ctx context.Context) (*ekstypes.Nodeg func (s *NodegroupService) deleteNodegroupAndWait(ctx context.Context) (reterr error) { eksClusterName := s.scope.KubernetesClusterName() nodegroupName := s.scope.NodegroupName() - if err := s.scope.NodegroupReadyFalse(clusterv1.DeletingReason, ""); err != nil { + if err := s.scope.NodegroupReadyFalse(clusterv1beta1.DeletingReason, ""); err != nil { return err } defer func() { @@ -277,7 +286,7 @@ func (s *NodegroupService) deleteNodegroupAndWait(ctx context.Context) (reterr e if err := s.scope.NodegroupReadyFalse("DeletingFailed", reterr.Error()); err != nil { reterr = err } - } else if err := s.scope.NodegroupReadyFalse(clusterv1.DeletedReason, ""); err != nil { + } else if err := s.scope.NodegroupReadyFalse(clusterv1beta1.DeletedReason, ""); err != nil { reterr = err } }() @@ -480,6 +489,14 @@ func (s *NodegroupService) reconcileNodegroupConfig(ctx context.Context, ng *eks input.UpdateConfig = updatedConfig needsUpdate = true } + + specRepairConfig := s.nodeRepairConfig() + if !cmp.Equal(ng.NodeRepairConfig, specRepairConfig, cmpopts.IgnoreUnexported(ekstypes.NodeRepairConfig{})) { + s.Debug("Nodegroup repair configuration differs from spec, updating the nodegroup repair config", "nodegroup", ng.NodegroupName) + input.NodeRepairConfig = specRepairConfig + needsUpdate = true + } + if !needsUpdate { s.Debug("node group config update not needed", "cluster", eksClusterName, "name", *ng.NodegroupName) return nil diff --git a/pkg/cloud/services/eks/oidc_test.go b/pkg/cloud/services/eks/oidc_test.go index 8a57b330e1..f54b33c67e 100644 --- a/pkg/cloud/services/eks/oidc_test.go +++ b/pkg/cloud/services/eks/oidc_test.go @@ -43,7 +43,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/iamauth/mock_iamauth" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/internal/testcert" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func TestOIDCReconcile(t *testing.T) { diff --git a/pkg/cloud/services/eks/roles.go b/pkg/cloud/services/eks/roles.go index 6d813ef416..c122c2768c 100644 --- a/pkg/cloud/services/eks/roles.go +++ b/pkg/cloud/services/eks/roles.go @@ -17,13 +17,13 @@ limitations under the License. package eks import ( + "context" "fmt" "strings" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/smithy-go" "github.com/pkg/errors" - "golang.org/x/net/context" "sigs.k8s.io/cluster-api-provider-aws/v2/cmd/clusterawsadm/api/bootstrap/v1beta1" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" @@ -31,7 +31,7 @@ import ( eksiam "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks/iam" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/eks" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( @@ -244,7 +244,7 @@ func (s *NodegroupService) reconcileNodegroupIAMRole(ctx context.Context) error } func (s *NodegroupService) deleteNodegroupIAMRole(ctx context.Context) (reterr error) { - if err := s.scope.IAMReadyFalse(clusterv1.DeletingReason, ""); err != nil { + if err := s.scope.IAMReadyFalse(clusterv1beta1.DeletingReason, ""); err != nil { return err } defer func() { @@ -255,7 +255,7 @@ func (s *NodegroupService) deleteNodegroupIAMRole(ctx context.Context) (reterr e if err := s.scope.IAMReadyFalse("DeletingFailed", reterr.Error()); err != nil { reterr = err } - } else if err := s.scope.IAMReadyFalse(clusterv1.DeletedReason, ""); err != nil { + } else if err := s.scope.IAMReadyFalse(clusterv1beta1.DeletedReason, ""); err != nil { reterr = err } }() @@ -356,7 +356,7 @@ func (s *FargateService) reconcileFargateIAMRole(ctx context.Context) (requeue b } func (s *FargateService) deleteFargateIAMRole(ctx context.Context) (reterr error) { - if err := s.scope.IAMReadyFalse(clusterv1.DeletingReason, ""); err != nil { + if err := s.scope.IAMReadyFalse(clusterv1beta1.DeletingReason, ""); err != nil { return err } defer func() { @@ -367,7 +367,7 @@ func (s *FargateService) deleteFargateIAMRole(ctx context.Context) (reterr error if err := s.scope.IAMReadyFalse("DeletingFailed", reterr.Error()); err != nil { reterr = err } - } else if err := s.scope.IAMReadyFalse(clusterv1.DeletedReason, ""); err != nil { + } else if err := s.scope.IAMReadyFalse(clusterv1beta1.DeletedReason, ""); err != nil { reterr = err } }() diff --git a/pkg/cloud/services/elb/loadbalancer.go b/pkg/cloud/services/elb/loadbalancer.go index 874ea2d815..63c7cd3be1 100644 --- a/pkg/cloud/services/elb/loadbalancer.go +++ b/pkg/cloud/services/elb/loadbalancer.go @@ -46,8 +46,8 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/hash" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) // ResourceGroups are filtered by ARN identifier: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arns-syntax @@ -682,7 +682,7 @@ func (s *Service) deleteAPIServerELB(ctx context.Context) error { return errors.Wrap(err, "failed to get control plane load balancer name") } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return err } @@ -690,7 +690,7 @@ func (s *Service) deleteAPIServerELB(ctx context.Context) error { apiELB, err := s.describeClassicELB(ctx, elbName) if IsNotFound(err) { s.scope.Debug("Control plane load balancer not found, skipping deletion") - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") return nil } if err != nil { @@ -699,13 +699,13 @@ func (s *Service) deleteAPIServerELB(ctx context.Context) error { if apiELB.IsUnmanaged(s.scope.Name()) { s.scope.Debug("Found unmanaged classic load balancer for apiserver, skipping deletion", "api-server-elb-name", apiELB.Name) - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") return nil } s.scope.Debug("deleting load balancer", "name", elbName) if err := s.deleteClassicELB(ctx, elbName); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } @@ -717,7 +717,7 @@ func (s *Service) deleteAPIServerELB(ctx context.Context) error { return errors.Wrapf(err, "failed to wait for %q load balancer deletion", s.scope.Name()) } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") s.scope.Info("Deleted control plane load balancer", "name", elbName) return nil } @@ -792,7 +792,7 @@ func (s *Service) deleteExistingNLB(ctx context.Context, lbSpec *infrav1.AWSLoad if err != nil { return errors.Wrap(err, "failed to get control plane load balancer name") } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return err } @@ -811,7 +811,7 @@ func (s *Service) deleteExistingNLB(ctx context.Context, lbSpec *infrav1.AWSLoad } s.scope.Debug("deleting load balancer", "name", name) if err := s.deleteLB(ctx, lb.ARN); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } @@ -823,7 +823,7 @@ func (s *Service) deleteExistingNLB(ctx context.Context, lbSpec *infrav1.AWSLoad return errors.Wrapf(err, "failed to wait for %q load balancer deletion", s.scope.Name()) } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") s.scope.Info("Deleted control plane load balancer", "name", name) return nil @@ -1422,7 +1422,6 @@ func (s *Service) listByTag(ctx context.Context, tag string) ([]string, error) { names = append(names, name) } }) - if err != nil { record.Eventf(s.scope.InfraCluster(), "FailedListELBsByTag", "Failed to list %s ELB by Tags: %v", s.scope.Name(), err) return nil, errors.Wrapf(err, "failed to list %s ELBs by tag group", s.scope.Name()) diff --git a/pkg/cloud/services/elb/loadbalancer_test.go b/pkg/cloud/services/elb/loadbalancer_test.go index d59c15c91b..7746a658ab 100644 --- a/pkg/cloud/services/elb/loadbalancer_test.go +++ b/pkg/cloud/services/elb/loadbalancer_test.go @@ -46,8 +46,9 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) var stubInfraV1TargetGroupSpecAPI = infrav1.TargetGroupSpec{ @@ -512,7 +513,7 @@ func TestRegisterInstanceWithAPIServerELB(t *testing.T) { Name: aws.String(elbName), }, NetworkSpec: infrav1.NetworkSpec{ - Subnets: infrav1.Subnets{{ + Subnets: infrav1.Subnets{infrav1.SubnetSpec{ ID: clusterSubnetID, AvailabilityZone: az, }}, @@ -577,7 +578,7 @@ func TestRegisterInstanceWithAPIServerELB(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: clusterName}, Spec: infrav1.AWSClusterSpec{ NetworkSpec: infrav1.NetworkSpec{ - Subnets: infrav1.Subnets{{ + Subnets: infrav1.Subnets{infrav1.SubnetSpec{ ID: clusterSubnetID, AvailabilityZone: az, }}, @@ -660,7 +661,7 @@ func TestRegisterInstanceWithAPIServerELB(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: clusterName}, Spec: infrav1.AWSClusterSpec{ NetworkSpec: infrav1.NetworkSpec{ - Subnets: infrav1.Subnets{{ + Subnets: infrav1.Subnets{infrav1.SubnetSpec{ ID: clusterSubnetID, AvailabilityZone: az, }}, @@ -816,7 +817,7 @@ func TestRegisterInstanceWithAPIServerNLB(t *testing.T) { LoadBalancerType: infrav1.LoadBalancerTypeNLB, }, NetworkSpec: infrav1.NetworkSpec{ - Subnets: infrav1.Subnets{{ + Subnets: infrav1.Subnets{infrav1.SubnetSpec{ ID: clusterSubnetID, AvailabilityZone: az, }}, @@ -919,7 +920,7 @@ func TestRegisterInstanceWithAPIServerNLB(t *testing.T) { }, }, NetworkSpec: infrav1.NetworkSpec{ - Subnets: infrav1.Subnets{{ + Subnets: infrav1.Subnets{infrav1.SubnetSpec{ ID: clusterSubnetID, AvailabilityZone: az, }}, @@ -1048,7 +1049,7 @@ func TestRegisterInstanceWithAPIServerNLB(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: clusterName}, Spec: infrav1.AWSClusterSpec{ NetworkSpec: infrav1.NetworkSpec{ - Subnets: infrav1.Subnets{{ + Subnets: infrav1.Subnets{infrav1.SubnetSpec{ ID: clusterSubnetID, AvailabilityZone: az, }}, @@ -2964,12 +2965,12 @@ func TestDeleteAPIServerELB(t *testing.T) { }).Return(nil, &elbtypes.AccessPointNotFoundException{}) }, verifyAWSCluster: func(awsCluster *infrav1.AWSCluster) { - loadBalancerConditionReady := conditions.IsTrue(awsCluster, infrav1.LoadBalancerReadyCondition) + loadBalancerConditionReady := v1beta1conditions.IsTrue(awsCluster, infrav1.LoadBalancerReadyCondition) if loadBalancerConditionReady { t.Fatalf("Expected LoadBalancerReady condition to be False, but was True") } - loadBalancerConditionReason := conditions.GetReason(awsCluster, infrav1.LoadBalancerReadyCondition) - if loadBalancerConditionReason != clusterv1.DeletedReason { + loadBalancerConditionReason := v1beta1conditions.GetReason(awsCluster, infrav1.LoadBalancerReadyCondition) + if loadBalancerConditionReason != clusterv1beta1.DeletedReason { t.Fatalf("Expected LoadBalancerReady condition reason to be Deleted, but was %s", loadBalancerConditionReason) } }, @@ -3013,12 +3014,12 @@ func TestDeleteAPIServerELB(t *testing.T) { ) }, verifyAWSCluster: func(awsCluster *infrav1.AWSCluster) { - loadBalancerConditionReady := conditions.IsTrue(awsCluster, infrav1.LoadBalancerReadyCondition) + loadBalancerConditionReady := v1beta1conditions.IsTrue(awsCluster, infrav1.LoadBalancerReadyCondition) if loadBalancerConditionReady { t.Fatalf("Expected LoadBalancerReady condition to be False, but was True") } - loadBalancerConditionReason := conditions.GetReason(awsCluster, infrav1.LoadBalancerReadyCondition) - if loadBalancerConditionReason != clusterv1.DeletedReason { + loadBalancerConditionReason := v1beta1conditions.GetReason(awsCluster, infrav1.LoadBalancerReadyCondition) + if loadBalancerConditionReason != clusterv1beta1.DeletedReason { t.Fatalf("Expected LoadBalancerReady condition reason to be Deleted, but was %s", loadBalancerConditionReason) } }, @@ -3075,12 +3076,12 @@ func TestDeleteAPIServerELB(t *testing.T) { ) }, verifyAWSCluster: func(awsCluster *infrav1.AWSCluster) { - loadBalancerConditionReady := conditions.IsTrue(awsCluster, infrav1.LoadBalancerReadyCondition) + loadBalancerConditionReady := v1beta1conditions.IsTrue(awsCluster, infrav1.LoadBalancerReadyCondition) if loadBalancerConditionReady { t.Fatalf("Expected LoadBalancerReady condition to be False, but was True") } - loadBalancerConditionReason := conditions.GetReason(awsCluster, infrav1.LoadBalancerReadyCondition) - if loadBalancerConditionReason != clusterv1.DeletedReason { + loadBalancerConditionReason := v1beta1conditions.GetReason(awsCluster, infrav1.LoadBalancerReadyCondition) + if loadBalancerConditionReason != clusterv1beta1.DeletedReason { t.Fatalf("Expected LoadBalancerReady condition reason to be Deleted, but was %s", loadBalancerConditionReason) } }, diff --git a/pkg/cloud/services/gc/cleanup_test.go b/pkg/cloud/services/gc/cleanup_test.go index 363ed94364..31e9c8cbc2 100644 --- a/pkg/cloud/services/gc/cleanup_test.go +++ b/pkg/cloud/services/gc/cleanup_test.go @@ -39,7 +39,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func TestReconcileDelete(t *testing.T) { @@ -938,11 +938,10 @@ func createEKSCluster() *clusterv1.Cluster { Namespace: "default", }, Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{ - Kind: "AWSManagedControlPlane", - APIVersion: ekscontrolplanev1.GroupVersion.String(), - Name: "cp1", - Namespace: "default", + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSManagedControlPlane", + APIGroup: ekscontrolplanev1.GroupVersion.Group, + Name: "cp1", }, }, } @@ -1021,11 +1020,10 @@ func createUnmanagedCluster() *clusterv1.Cluster { Namespace: "default", }, Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{ - Kind: "AWSCluster", - APIVersion: infrav1.GroupVersion.String(), - Name: "cluster1", - Namespace: "default", + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSCluster", + APIGroup: infrav1.GroupVersion.Group, + Name: "cluster1", }, }, } diff --git a/pkg/cloud/services/iamauth/reconcile.go b/pkg/cloud/services/iamauth/reconcile.go index c3a6407940..e01b574a2a 100644 --- a/pkg/cloud/services/iamauth/reconcile.go +++ b/pkg/cloud/services/iamauth/reconcile.go @@ -23,15 +23,13 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/iam" "github.com/pkg/errors" - corev1 "k8s.io/api/core/v1" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // ReconcileIAMAuthenticator is used to create the aws-iam-authenticator in a cluster. @@ -141,7 +139,7 @@ func (s *Service) getRolesForMachineDeployments(ctx context.Context, allRoles ma Namespace: s.scope.Namespace(), }, awsMachineTemplate) if err != nil { - return fmt.Errorf("failed to get AWSMachine %s/%s: %w", ref.Namespace, ref.Name, err) + return fmt.Errorf("failed to get AWSMachine %s/%s: %w", s.scope.Namespace(), ref.Name, err) } instanceProfile := awsMachineTemplate.Spec.Template.Spec.IAMInstanceProfile if _, ok := allRoles[instanceProfile]; !ok && instanceProfile != "" { @@ -152,7 +150,7 @@ func (s *Service) getRolesForMachineDeployments(ctx context.Context, allRoles ma } func (s *Service) getRolesForMachinePools(ctx context.Context, allRoles map[string]struct{}) error { - machinePoolList := &expclusterv1.MachinePoolList{} + machinePoolList := &clusterv1.MachinePoolList{} selectors := []client.ListOption{ client.InNamespace(s.scope.Namespace()), client.MatchingLabels{ @@ -180,14 +178,14 @@ func (s *Service) getRolesForMachinePools(ctx context.Context, allRoles map[stri return nil } -func (s *Service) getRolesForAWSMachinePool(ctx context.Context, ref corev1.ObjectReference, allRoles map[string]struct{}) error { +func (s *Service) getRolesForAWSMachinePool(ctx context.Context, ref clusterv1.ContractVersionedObjectReference, allRoles map[string]struct{}) error { awsMachinePool := &expinfrav1.AWSMachinePool{} err := s.client.Get(ctx, client.ObjectKey{ Name: ref.Name, Namespace: s.scope.Namespace(), }, awsMachinePool) if err != nil { - return fmt.Errorf("failed to get AWSMachine %s/%s: %w", ref.Namespace, ref.Name, err) + return fmt.Errorf("failed to get AWSMachine %s/%s: %w", s.scope.Namespace(), ref.Name, err) } instanceProfile := awsMachinePool.Spec.AWSLaunchTemplate.IamInstanceProfile if _, ok := allRoles[instanceProfile]; !ok && instanceProfile != "" { @@ -196,14 +194,14 @@ func (s *Service) getRolesForAWSMachinePool(ctx context.Context, ref corev1.Obje return nil } -func (s *Service) getRolesForAWSManagedMachinePool(ctx context.Context, ref corev1.ObjectReference, allRoles map[string]struct{}) error { +func (s *Service) getRolesForAWSManagedMachinePool(ctx context.Context, ref clusterv1.ContractVersionedObjectReference, allRoles map[string]struct{}) error { awsManagedMachinePool := &expinfrav1.AWSManagedMachinePool{} err := s.client.Get(ctx, client.ObjectKey{ Name: ref.Name, Namespace: s.scope.Namespace(), }, awsManagedMachinePool) if err != nil { - return fmt.Errorf("failed to get AWSMachine %s/%s: %w", ref.Namespace, ref.Name, err) + return fmt.Errorf("failed to get AWSMachine %s/%s: %w", s.scope.Namespace(), ref.Name, err) } instanceProfile := awsManagedMachinePool.Spec.RoleName if _, ok := allRoles[instanceProfile]; !ok && instanceProfile != "" { diff --git a/pkg/cloud/services/iamauth/reconcile_test.go b/pkg/cloud/services/iamauth/reconcile_test.go index 91b1d4b9a0..3c239f5b16 100644 --- a/pkg/cloud/services/iamauth/reconcile_test.go +++ b/pkg/cloud/services/iamauth/reconcile_test.go @@ -23,7 +23,6 @@ import ( "github.com/golang/mock/gomock" . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" @@ -31,8 +30,7 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" ) @@ -60,25 +58,33 @@ func TestReconcileIAMAuth(t *testing.T) { eksCluster := createEKSCluster(name, ns) g.Expect(testEnv.Create(ctx, eksCluster)).To(Succeed()) awsMP := createAWSMachinePoolForClusterWithInstanceProfile(name, ns, eksCluster.Name, "nodes.cluster-api-provider-aws.sigs.k8s.io") - infraRef := corev1.ObjectReference{ - Kind: awsMP.TypeMeta.Kind, - Name: awsMP.Name, - Namespace: awsMP.Namespace, - APIVersion: awsMP.TypeMeta.APIVersion, + infraRef := clusterv1.ContractVersionedObjectReference{ + Kind: awsMP.TypeMeta.Kind, + Name: awsMP.Name, + APIGroup: awsMP.TypeMeta.GroupVersionKind().Group, + } + configRef := clusterv1.ContractVersionedObjectReference{ + Kind: "EKSConfig", + Name: awsMP.Name, + APIGroup: awsMP.TypeMeta.GroupVersionKind().Group, } g.Expect(testEnv.Create(ctx, awsMP)).To(Succeed()) - mp := createMachinepoolForCluster(name, ns, eksCluster.Name, infraRef) + mp := createMachinepoolForCluster(name, ns, eksCluster.Name, infraRef, configRef) g.Expect(testEnv.Create(ctx, mp)).To(Succeed()) awsMachineTemplate := createAWSMachineTemplateForClusterWithInstanceProfile(name, ns, eksCluster.Name, "eks-nodes.cluster-api-provider-aws.sigs.k8s.io") - infraRefForMD := corev1.ObjectReference{ - Kind: awsMachineTemplate.TypeMeta.Kind, - Name: awsMachineTemplate.Name, - Namespace: awsMachineTemplate.Namespace, - APIVersion: awsMachineTemplate.TypeMeta.APIVersion, + infraRefForMD := clusterv1.ContractVersionedObjectReference{ + Kind: awsMachineTemplate.TypeMeta.Kind, + Name: awsMachineTemplate.Name, + APIGroup: awsMachineTemplate.TypeMeta.GroupVersionKind().Group, + } + configRefForMD := clusterv1.ContractVersionedObjectReference{ + Kind: "EKSConfig", + Name: awsMachineTemplate.Name, + APIGroup: awsMachineTemplate.TypeMeta.GroupVersionKind().Group, } g.Expect(testEnv.Create(ctx, awsMachineTemplate)).To(Succeed()) - md := createMachineDeploymentForCluster(name, ns, eksCluster.Name, infraRefForMD) + md := createMachineDeploymentForCluster(name, ns, eksCluster.Name, infraRefForMD, configRefForMD) g.Expect(testEnv.Create(ctx, md)).To(Succeed()) expectedRoles := map[string]struct{}{ @@ -127,7 +133,8 @@ func createEKSCluster(name, namespace string) *ekscontrolplanev1.AWSManagedContr func createAWSMachinePoolForClusterWithInstanceProfile(name, namespace, clusterName, instanceProfile string) *expinfrav1.AWSMachinePool { awsMP := &expinfrav1.AWSMachinePool{ TypeMeta: metav1.TypeMeta{ - Kind: "AWSMachinePool", + Kind: "AWSMachinePool", + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta2", }, ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -146,8 +153,8 @@ func createAWSMachinePoolForClusterWithInstanceProfile(name, namespace, clusterN return awsMP } -func createMachinepoolForCluster(name, namespace, clusterName string, infrastructureRef corev1.ObjectReference) *expclusterv1.MachinePool { - mp := &expclusterv1.MachinePool{ +func createMachinepoolForCluster(name, namespace, clusterName string, infrastructureRef, configRef clusterv1.ContractVersionedObjectReference) *clusterv1.MachinePool { + mp := &clusterv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, @@ -155,12 +162,15 @@ func createMachinepoolForCluster(name, namespace, clusterName string, infrastruc clusterv1.ClusterNameLabel: clusterName, }, }, - Spec: expclusterv1.MachinePoolSpec{ + Spec: clusterv1.MachinePoolSpec{ ClusterName: clusterName, Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ ClusterName: clusterName, InfrastructureRef: infrastructureRef, + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: configRef, + }, }, }, }, @@ -171,7 +181,8 @@ func createMachinepoolForCluster(name, namespace, clusterName string, infrastruc func createAWSMachineTemplateForClusterWithInstanceProfile(name, namespace, clusterName, instanceProfile string) *infrav1.AWSMachineTemplate { mt := &infrav1.AWSMachineTemplate{ TypeMeta: metav1.TypeMeta{ - Kind: "AWSMachineTemplate", + Kind: "AWSMachineTemplate", + APIVersion: "bootstrap.cluster.x-k8s.io/v1beta2", }, ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -192,7 +203,7 @@ func createAWSMachineTemplateForClusterWithInstanceProfile(name, namespace, clus return mt } -func createMachineDeploymentForCluster(name, namespace, clusterName string, infrastructureRef corev1.ObjectReference) *clusterv1.MachineDeployment { +func createMachineDeploymentForCluster(name, namespace, clusterName string, infrastructureRef, configRefForMD clusterv1.ContractVersionedObjectReference) *clusterv1.MachineDeployment { md := &clusterv1.MachineDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -203,10 +214,16 @@ func createMachineDeploymentForCluster(name, namespace, clusterName string, infr }, Spec: clusterv1.MachineDeploymentSpec{ ClusterName: clusterName, + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{"app": "test-app"}, + }, Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ ClusterName: clusterName, InfrastructureRef: infrastructureRef, + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: configRefForMD, + }, }, }, Replicas: ptr.To[int32](2), diff --git a/pkg/cloud/services/iamauth/suite_test.go b/pkg/cloud/services/iamauth/suite_test.go index d94ce1bfaf..48064de119 100644 --- a/pkg/cloud/services/iamauth/suite_test.go +++ b/pkg/cloud/services/iamauth/suite_test.go @@ -29,8 +29,7 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) var ( @@ -48,9 +47,7 @@ func setup() { utilruntime.Must(infrav1.AddToScheme(scheme.Scheme)) utilruntime.Must(ekscontrolplanev1.AddToScheme(scheme.Scheme)) utilruntime.Must(expinfrav1.AddToScheme(scheme.Scheme)) - utilruntime.Must(expclusterv1.AddToScheme(scheme.Scheme)) utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme)) - utilruntime.Must(expclusterv1.AddToScheme(scheme.Scheme)) testEnvConfig := helpers.NewTestEnvironmentConfiguration([]string{ path.Join("config", "crd", "bases"), diff --git a/pkg/cloud/services/instancestate/helpers_test.go b/pkg/cloud/services/instancestate/helpers_test.go index 5e004e08f5..53f9f922f9 100644 --- a/pkg/cloud/services/instancestate/helpers_test.go +++ b/pkg/cloud/services/instancestate/helpers_test.go @@ -23,7 +23,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func setupCluster(clusterName string) (*scope.ClusterScope, error) { diff --git a/pkg/cloud/services/interfaces.go b/pkg/cloud/services/interfaces.go index 65b08a2ecd..5b4eccb30e 100644 --- a/pkg/cloud/services/interfaces.go +++ b/pkg/cloud/services/interfaces.go @@ -20,8 +20,10 @@ package services import ( "context" + autoscalingtypes "github.com/aws/aws-sdk-go-v2/service/autoscaling/types" ec2types "github.com/aws/aws-sdk-go-v2/service/ec2/types" apimachinerytypes "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" @@ -44,8 +46,9 @@ type ASGInterface interface { GetASGByName(scope *scope.MachinePoolScope) (*expinfrav1.AutoScalingGroup, error) CreateASG(scope *scope.MachinePoolScope) (*expinfrav1.AutoScalingGroup, error) UpdateASG(scope *scope.MachinePoolScope) error + CancelASGInstanceRefresh(scope *scope.MachinePoolScope) error StartASGInstanceRefresh(scope *scope.MachinePoolScope) error - CanStartASGInstanceRefresh(scope *scope.MachinePoolScope) (bool, error) + CanStartASGInstanceRefresh(scope *scope.MachinePoolScope) (bool, *autoscalingtypes.InstanceRefreshStatus, error) UpdateResourceTags(resourceID *string, create, remove map[string]string) error DeleteASGAndWait(id string) error SuspendProcesses(name string, processes []string) error @@ -91,13 +94,18 @@ type EC2Interface interface { // ReleaseElasticIP reconciles the elastic IP from a custom Public IPv4 Pool. ReleaseElasticIP(instanceID string) error + + // Dedicated Host management + AllocateDedicatedHost(ctx context.Context, spec *infrav1.DynamicHostAllocationSpec, instanceType, availabilityZone string, scope *scope.MachineScope) (string, error) + ReleaseDedicatedHost(ctx context.Context, hostID string) error + DescribeDedicatedHost(ctx context.Context, hostID string) (*infrav1.DedicatedHostInfo, error) } // MachinePoolReconcileInterface encapsulates high-level reconciliation functions regarding EC2 reconciliation. It is // separate from EC2Interface so that we can mock AWS requests separately. For example, by not mocking the // ReconcileLaunchTemplate function, but mocking EC2Interface, we can test which EC2 API operations would have been called. type MachinePoolReconcileInterface interface { - ReconcileLaunchTemplate(ctx context.Context, ignitionScope scope.IgnitionScope, scope scope.LaunchTemplateScope, s3Scope scope.S3Scope, ec2svc EC2Interface, objectStoreSvc ObjectStoreInterface, canUpdateLaunchTemplate func() (bool, error), runPostLaunchTemplateUpdateOperation func() error) error + ReconcileLaunchTemplate(ctx context.Context, ignitionScope scope.IgnitionScope, scope scope.LaunchTemplateScope, s3Scope scope.S3Scope, ec2svc EC2Interface, objectStoreSvc ObjectStoreInterface, canUpdateLaunchTemplate func() (bool, *autoscalingtypes.InstanceRefreshStatus, error), cancelInstanceRefresh func() error, runPostLaunchTemplateUpdateOperation func() error) (*ctrl.Result, error) ReconcileTags(scope scope.LaunchTemplateScope, resourceServicesToUpdate []scope.ResourceServiceToUpdate) error } diff --git a/pkg/cloud/services/mock_services/autoscaling_interface_mock.go b/pkg/cloud/services/mock_services/autoscaling_interface_mock.go index fcee553a07..7381f6b913 100644 --- a/pkg/cloud/services/mock_services/autoscaling_interface_mock.go +++ b/pkg/cloud/services/mock_services/autoscaling_interface_mock.go @@ -24,6 +24,7 @@ import ( context "context" reflect "reflect" + types "github.com/aws/aws-sdk-go-v2/service/autoscaling/types" gomock "github.com/golang/mock/gomock" v1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" scope "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" @@ -68,12 +69,13 @@ func (mr *MockASGInterfaceMockRecorder) ASGIfExists(arg0 interface{}) *gomock.Ca } // CanStartASGInstanceRefresh mocks base method. -func (m *MockASGInterface) CanStartASGInstanceRefresh(arg0 *scope.MachinePoolScope) (bool, error) { +func (m *MockASGInterface) CanStartASGInstanceRefresh(arg0 *scope.MachinePoolScope) (bool, *types.InstanceRefreshStatus, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "CanStartASGInstanceRefresh", arg0) ret0, _ := ret[0].(bool) - ret1, _ := ret[1].(error) - return ret0, ret1 + ret1, _ := ret[1].(*types.InstanceRefreshStatus) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 } // CanStartASGInstanceRefresh indicates an expected call of CanStartASGInstanceRefresh. @@ -82,6 +84,20 @@ func (mr *MockASGInterfaceMockRecorder) CanStartASGInstanceRefresh(arg0 interfac return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CanStartASGInstanceRefresh", reflect.TypeOf((*MockASGInterface)(nil).CanStartASGInstanceRefresh), arg0) } +// CancelASGInstanceRefresh mocks base method. +func (m *MockASGInterface) CancelASGInstanceRefresh(arg0 *scope.MachinePoolScope) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CancelASGInstanceRefresh", arg0) + ret0, _ := ret[0].(error) + return ret0 +} + +// CancelASGInstanceRefresh indicates an expected call of CancelASGInstanceRefresh. +func (mr *MockASGInterfaceMockRecorder) CancelASGInstanceRefresh(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CancelASGInstanceRefresh", reflect.TypeOf((*MockASGInterface)(nil).CancelASGInstanceRefresh), arg0) +} + // CreateASG mocks base method. func (m *MockASGInterface) CreateASG(arg0 *scope.MachinePoolScope) (*v1beta2.AutoScalingGroup, error) { m.ctrl.T.Helper() diff --git a/pkg/cloud/services/mock_services/ec2_interface_mock.go b/pkg/cloud/services/mock_services/ec2_interface_mock.go index 78b52d93df..6b923ac502 100644 --- a/pkg/cloud/services/mock_services/ec2_interface_mock.go +++ b/pkg/cloud/services/mock_services/ec2_interface_mock.go @@ -55,6 +55,21 @@ func (m *MockEC2Interface) EXPECT() *MockEC2InterfaceMockRecorder { return m.recorder } +// AllocateDedicatedHost mocks base method. +func (m *MockEC2Interface) AllocateDedicatedHost(arg0 context.Context, arg1 *v1beta2.DynamicHostAllocationSpec, arg2, arg3 string, arg4 *scope.MachineScope) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AllocateDedicatedHost", arg0, arg1, arg2, arg3, arg4) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AllocateDedicatedHost indicates an expected call of AllocateDedicatedHost. +func (mr *MockEC2InterfaceMockRecorder) AllocateDedicatedHost(arg0, arg1, arg2, arg3, arg4 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllocateDedicatedHost", reflect.TypeOf((*MockEC2Interface)(nil).AllocateDedicatedHost), arg0, arg1, arg2, arg3, arg4) +} + // CreateInstance mocks base method. func (m *MockEC2Interface) CreateInstance(arg0 context.Context, arg1 *scope.MachineScope, arg2 []byte, arg3 string) (*v1beta2.Instance, error) { m.ctrl.T.Helper() @@ -127,6 +142,21 @@ func (mr *MockEC2InterfaceMockRecorder) DeleteLaunchTemplate(arg0 interface{}) * return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteLaunchTemplate", reflect.TypeOf((*MockEC2Interface)(nil).DeleteLaunchTemplate), arg0) } +// DescribeDedicatedHost mocks base method. +func (m *MockEC2Interface) DescribeDedicatedHost(arg0 context.Context, arg1 string) (*v1beta2.DedicatedHostInfo, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DescribeDedicatedHost", arg0, arg1) + ret0, _ := ret[0].(*v1beta2.DedicatedHostInfo) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeDedicatedHost indicates an expected call of DescribeDedicatedHost. +func (mr *MockEC2InterfaceMockRecorder) DescribeDedicatedHost(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeDedicatedHost", reflect.TypeOf((*MockEC2Interface)(nil).DescribeDedicatedHost), arg0, arg1) +} + // DetachSecurityGroupsFromNetworkInterface mocks base method. func (m *MockEC2Interface) DetachSecurityGroupsFromNetworkInterface(arg0 []string, arg1 string) error { m.ctrl.T.Helper() @@ -352,6 +382,20 @@ func (mr *MockEC2InterfaceMockRecorder) ReconcileElasticIPFromPublicPool(arg0, a return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReconcileElasticIPFromPublicPool", reflect.TypeOf((*MockEC2Interface)(nil).ReconcileElasticIPFromPublicPool), arg0, arg1) } +// ReleaseDedicatedHost mocks base method. +func (m *MockEC2Interface) ReleaseDedicatedHost(arg0 context.Context, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReleaseDedicatedHost", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// ReleaseDedicatedHost indicates an expected call of ReleaseDedicatedHost. +func (mr *MockEC2InterfaceMockRecorder) ReleaseDedicatedHost(arg0, arg1 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReleaseDedicatedHost", reflect.TypeOf((*MockEC2Interface)(nil).ReleaseDedicatedHost), arg0, arg1) +} + // ReleaseElasticIP mocks base method. func (m *MockEC2Interface) ReleaseElasticIP(arg0 string) error { m.ctrl.T.Helper() diff --git a/pkg/cloud/services/mock_services/reconcile_interface_mock.go b/pkg/cloud/services/mock_services/reconcile_interface_mock.go index f3fd1f490d..d6d393eca8 100644 --- a/pkg/cloud/services/mock_services/reconcile_interface_mock.go +++ b/pkg/cloud/services/mock_services/reconcile_interface_mock.go @@ -24,9 +24,11 @@ import ( context "context" reflect "reflect" + types "github.com/aws/aws-sdk-go-v2/service/autoscaling/types" gomock "github.com/golang/mock/gomock" scope "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" services "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services" + reconcile "sigs.k8s.io/controller-runtime/pkg/reconcile" ) // MockMachinePoolReconcileInterface is a mock of MachinePoolReconcileInterface interface. @@ -53,17 +55,18 @@ func (m *MockMachinePoolReconcileInterface) EXPECT() *MockMachinePoolReconcileIn } // ReconcileLaunchTemplate mocks base method. -func (m *MockMachinePoolReconcileInterface) ReconcileLaunchTemplate(arg0 context.Context, arg1 scope.IgnitionScope, arg2 scope.LaunchTemplateScope, arg3 scope.S3Scope, arg4 services.EC2Interface, arg5 services.ObjectStoreInterface, arg6 func() (bool, error), arg7 func() error) error { +func (m *MockMachinePoolReconcileInterface) ReconcileLaunchTemplate(arg0 context.Context, arg1 scope.IgnitionScope, arg2 scope.LaunchTemplateScope, arg3 scope.S3Scope, arg4 services.EC2Interface, arg5 services.ObjectStoreInterface, arg6 func() (bool, *types.InstanceRefreshStatus, error), arg7, arg8 func() error) (*reconcile.Result, error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ReconcileLaunchTemplate", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) - ret0, _ := ret[0].(error) - return ret0 + ret := m.ctrl.Call(m, "ReconcileLaunchTemplate", arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) + ret0, _ := ret[0].(*reconcile.Result) + ret1, _ := ret[1].(error) + return ret0, ret1 } // ReconcileLaunchTemplate indicates an expected call of ReconcileLaunchTemplate. -func (mr *MockMachinePoolReconcileInterfaceMockRecorder) ReconcileLaunchTemplate(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7 interface{}) *gomock.Call { +func (mr *MockMachinePoolReconcileInterfaceMockRecorder) ReconcileLaunchTemplate(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8 interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReconcileLaunchTemplate", reflect.TypeOf((*MockMachinePoolReconcileInterface)(nil).ReconcileLaunchTemplate), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReconcileLaunchTemplate", reflect.TypeOf((*MockMachinePoolReconcileInterface)(nil).ReconcileLaunchTemplate), arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8) } // ReconcileTags mocks base method. diff --git a/pkg/cloud/services/network/carriergateways.go b/pkg/cloud/services/network/carriergateways.go index af89c43bc4..fb7c56954e 100644 --- a/pkg/cloud/services/network/carriergateways.go +++ b/pkg/cloud/services/network/carriergateways.go @@ -33,7 +33,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) func (s *Service) reconcileCarrierGateway() error { @@ -78,7 +78,7 @@ func (s *Service) reconcileCarrierGateway() error { record.Warnf(s.scope.InfraCluster(), "FailedTagCarrierGateway", "Failed to tag managed Carrier Gateway %q: %v", cagw.CarrierGatewayId, err) return errors.Wrapf(err, "failed to tag carrier gateway %q", *cagw.CarrierGatewayId) } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.CarrierGatewayReadyCondition) + v1beta1conditions.MarkTrue(s.scope.InfraCluster(), infrav1.CarrierGatewayReadyCondition) return nil } diff --git a/pkg/cloud/services/network/carriergateways_test.go b/pkg/cloud/services/network/carriergateways_test.go index c23a873c02..722446eff9 100644 --- a/pkg/cloud/services/network/carriergateways_test.go +++ b/pkg/cloud/services/network/carriergateways_test.go @@ -33,7 +33,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func TestReconcileCarrierGateway(t *testing.T) { diff --git a/pkg/cloud/services/network/egress_only_gateways.go b/pkg/cloud/services/network/egress_only_gateways.go index e710adecf7..865ea04fd5 100644 --- a/pkg/cloud/services/network/egress_only_gateways.go +++ b/pkg/cloud/services/network/egress_only_gateways.go @@ -33,7 +33,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) func (s *Service) reconcileEgressOnlyInternetGateways() error { @@ -79,7 +79,7 @@ func (s *Service) reconcileEgressOnlyInternetGateways() error { record.Warnf(s.scope.InfraCluster(), "FailedTagEgressOnlyInternetGateway", "Failed to tag managed Egress Only Internet Gateway %q: %v", gateway.EgressOnlyInternetGatewayId, err) return errors.Wrapf(err, "failed to tag egress only internet gateway %q", *gateway.EgressOnlyInternetGatewayId) } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition) + v1beta1conditions.MarkTrue(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition) return nil } @@ -136,9 +136,12 @@ func (s *Service) createEgressOnlyInternetGateway() (*types.EgressOnlyInternetGa } func (s *Service) describeEgressOnlyVpcInternetGateways() ([]types.EgressOnlyInternetGateway, error) { + // The API for DescribeEgressOnlyInternetGateways does not support filtering by VPC ID attachment. + // More details: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeEgressOnlyInternetGateways.html + // Since the eigw is managed by CAPA, we can filter by the kubernetes cluster tag. out, err := s.EC2Client.DescribeEgressOnlyInternetGateways(context.TODO(), &ec2.DescribeEgressOnlyInternetGatewaysInput{ Filters: []types.Filter{ - filter.EC2.VPCAttachment(s.scope.VPC().ID), + filter.EC2.Cluster(s.scope.Name()), }, }) if err != nil { @@ -146,11 +149,28 @@ func (s *Service) describeEgressOnlyVpcInternetGateways() ([]types.EgressOnlyInt return nil, errors.Wrapf(err, "failed to describe egress only internet gateways in vpc %q", s.scope.VPC().ID) } - if len(out.EgressOnlyInternetGateways) == 0 { + // For safeguarding, we collect only egress-only internet gateways + // that are attached to the VPC. + eigws := make([]types.EgressOnlyInternetGateway, 0) + for _, eigw := range out.EgressOnlyInternetGateways { + for _, attachment := range eigw.Attachments { + if aws.ToString(attachment.VpcId) == s.scope.VPC().ID { + eigws = append(eigws, eigw) + } + } + } + + if len(eigws) == 0 { return nil, awserrors.NewNotFound(fmt.Sprintf("no egress only internet gateways found in vpc %q", s.scope.VPC().ID)) + } else if len(eigws) > 1 { + eigwIDs := make([]string, len(eigws)) + for i, eigw := range eigws { + eigwIDs[i] = aws.ToString(eigw.EgressOnlyInternetGatewayId) + } + return nil, awserrors.NewConflict(fmt.Sprintf("expected 1 egress only internet gateway in vpc %q, but found %v: %v", s.scope.VPC().ID, len(eigws), eigwIDs)) } - return out.EgressOnlyInternetGateways, nil + return eigws, nil } func (s *Service) getEgressOnlyGatewayTagParams(id string) infrav1.BuildParams { diff --git a/pkg/cloud/services/network/egress_only_gateways_test.go b/pkg/cloud/services/network/egress_only_gateways_test.go index fbd859ab80..ee3f57a47d 100644 --- a/pkg/cloud/services/network/egress_only_gateways_test.go +++ b/pkg/cloud/services/network/egress_only_gateways_test.go @@ -32,7 +32,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func TestReconcileEgressOnlyInternetGateways(t *testing.T) { @@ -40,9 +40,10 @@ func TestReconcileEgressOnlyInternetGateways(t *testing.T) { defer mockCtrl.Finish() testCases := []struct { - name string - input *infrav1.NetworkSpec - expect func(m *mocks.MockEC2APIMockRecorder) + name string + input *infrav1.NetworkSpec + expect func(m *mocks.MockEC2APIMockRecorder) + wantErrContaining *string }{ { name: "has eigw", @@ -75,6 +76,44 @@ func TestReconcileEgressOnlyInternetGateways(t *testing.T) { Return(nil, nil) }, }, + { + name: "has more than 1 eigw, should return error", + input: &infrav1.NetworkSpec{ + VPC: infrav1.VPCSpec{ + ID: "vpc-egress-only-gateways", + IPv6: &infrav1.IPv6{}, + Tags: infrav1.Tags{ + infrav1.ClusterTagKey("test-cluster"): "owned", + }, + }, + }, + wantErrContaining: aws.String("expected 1 egress only internet gateway in vpc \"vpc-egress-only-gateways\", but found 2: [eigw-0 eigw-1]"), + expect: func(m *mocks.MockEC2APIMockRecorder) { + m.DescribeEgressOnlyInternetGateways(context.TODO(), gomock.AssignableToTypeOf(&ec2.DescribeEgressOnlyInternetGatewaysInput{})). + Return(&ec2.DescribeEgressOnlyInternetGatewaysOutput{ + EgressOnlyInternetGateways: []types.EgressOnlyInternetGateway{ + { + EgressOnlyInternetGatewayId: aws.String("eigw-0"), + Attachments: []types.InternetGatewayAttachment{ + { + State: types.AttachmentStatusAttached, + VpcId: aws.String("vpc-egress-only-gateways"), + }, + }, + }, + { + EgressOnlyInternetGatewayId: aws.String("eigw-1"), + Attachments: []types.InternetGatewayAttachment{ + { + State: types.AttachmentStatusAttached, + VpcId: aws.String("vpc-egress-only-gateways"), + }, + }, + }, + }, + }, nil) + }, + }, { name: "no eigw attached, creates one", input: &infrav1.NetworkSpec{ @@ -122,10 +161,13 @@ func TestReconcileEgressOnlyInternetGateways(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) ec2Mock := mocks.NewMockEC2API(mockCtrl) scheme := runtime.NewScheme() - _ = infrav1.AddToScheme(scheme) + err := infrav1.AddToScheme(scheme) + g.Expect(err).NotTo(HaveOccurred()) + client := fake.NewClientBuilder().WithScheme(scheme).Build() scope, err := scope.NewClusterScope(scope.ClusterScopeParams{ Client: client, @@ -139,18 +181,20 @@ func TestReconcileEgressOnlyInternetGateways(t *testing.T) { }, }, }) - if err != nil { - t.Fatalf("Failed to create test context: %v", err) - } + g.Expect(err).NotTo(HaveOccurred()) tc.expect(ec2Mock.EXPECT()) s := NewService(scope) s.EC2Client = ec2Mock - if err := s.reconcileEgressOnlyInternetGateways(); err != nil { - t.Fatalf("got an unexpected error: %v", err) + err = s.reconcileEgressOnlyInternetGateways() + if tc.wantErrContaining != nil { + g.Expect(err).To(HaveOccurred()) + g.Expect(err.Error()).To(ContainSubstring(*tc.wantErrContaining)) + return } + g.Expect(err).NotTo(HaveOccurred()) }) } } @@ -199,8 +243,8 @@ func TestDeleteEgressOnlyInternetGateways(t *testing.T) { m.DescribeEgressOnlyInternetGateways(context.TODO(), gomock.Eq(&ec2.DescribeEgressOnlyInternetGatewaysInput{ Filters: []types.Filter{ { - Name: aws.String("attachment.vpc-id"), - Values: []string{"vpc-gateways"}, + Name: aws.String("tag-key"), + Values: []string{infrav1.ClusterTagKey("test-cluster")}, }, }, })).Return(&ec2.DescribeEgressOnlyInternetGatewaysOutput{}, nil) diff --git a/pkg/cloud/services/network/eips_test.go b/pkg/cloud/services/network/eips_test.go index 53dbc23dd2..7992367fd4 100644 --- a/pkg/cloud/services/network/eips_test.go +++ b/pkg/cloud/services/network/eips_test.go @@ -33,7 +33,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func TestServiceReleaseAddresses(t *testing.T) { diff --git a/pkg/cloud/services/network/gateways.go b/pkg/cloud/services/network/gateways.go index ee9fa65692..91bea14e5e 100644 --- a/pkg/cloud/services/network/gateways.go +++ b/pkg/cloud/services/network/gateways.go @@ -33,7 +33,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) func (s *Service) reconcileInternetGateways() error { @@ -74,7 +74,7 @@ func (s *Service) reconcileInternetGateways() error { record.Warnf(s.scope.InfraCluster(), "FailedTagInternetGateway", "Failed to tag managed Internet Gateway %q: %v", gateway.InternetGatewayId, err) return errors.Wrapf(err, "failed to tag internet gateway %q", *gateway.InternetGatewayId) } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.InternetGatewayReadyCondition) + v1beta1conditions.MarkTrue(s.scope.InfraCluster(), infrav1.InternetGatewayReadyCondition) return nil } diff --git a/pkg/cloud/services/network/gateways_test.go b/pkg/cloud/services/network/gateways_test.go index 62d35e3b69..d1b0f7dfd0 100644 --- a/pkg/cloud/services/network/gateways_test.go +++ b/pkg/cloud/services/network/gateways_test.go @@ -32,7 +32,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func TestReconcileInternetGateways(t *testing.T) { diff --git a/pkg/cloud/services/network/natgateways.go b/pkg/cloud/services/network/natgateways.go index 6bde5f5a64..78c041637d 100644 --- a/pkg/cloud/services/network/natgateways.go +++ b/pkg/cloud/services/network/natgateways.go @@ -36,8 +36,8 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) func (s *Service) reconcileNatGateways() error { @@ -54,20 +54,20 @@ func (s *Service) reconcileNatGateways() error { if len(s.scope.Subnets().FilterPrivate().FilterNonCni()) == 0 { s.scope.Debug("No private subnets available, skipping NAT gateways") - conditions.MarkFalse( + v1beta1conditions.MarkFalse( s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition, infrav1.NatGatewaysReconciliationFailedReason, - clusterv1.ConditionSeverityWarning, + clusterv1beta1.ConditionSeverityWarning, "No private subnets available, skipping NAT gateways") return nil } else if len(s.scope.Subnets().FilterPublic().FilterNonCni()) == 0 { s.scope.Debug("No public subnets available. Cannot create NAT gateways for private subnets, this might be a configuration error.") - conditions.MarkFalse( + v1beta1conditions.MarkFalse( s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition, infrav1.NatGatewaysReconciliationFailedReason, - clusterv1.ConditionSeverityWarning, + clusterv1beta1.ConditionSeverityWarning, "No public subnets available. Cannot create NAT gateways for private subnets, this might be a configuration error.") return nil } @@ -80,8 +80,8 @@ func (s *Service) reconcileNatGateways() error { // Batch the creation of NAT gateways if len(subnetIDs) > 0 { // set NatGatewayCreationStarted if the condition has never been set before - if !conditions.Has(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition) { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition, infrav1.NatGatewaysCreationStartedReason, clusterv1.ConditionSeverityInfo, "") + if !v1beta1conditions.Has(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition) { + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition, infrav1.NatGatewaysCreationStartedReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return errors.Wrap(err, "failed to patch conditions") } @@ -100,7 +100,7 @@ func (s *Service) reconcileNatGateways() error { if err != nil { return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition) + v1beta1conditions.MarkTrue(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition) } return nil diff --git a/pkg/cloud/services/network/natgateways_test.go b/pkg/cloud/services/network/natgateways_test.go index 79277625b5..7c5eee7e39 100644 --- a/pkg/cloud/services/network/natgateways_test.go +++ b/pkg/cloud/services/network/natgateways_test.go @@ -34,7 +34,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) const ( diff --git a/pkg/cloud/services/network/network.go b/pkg/cloud/services/network/network.go index 35aa421be7..cf7941a0a0 100644 --- a/pkg/cloud/services/network/network.go +++ b/pkg/cloud/services/network/network.go @@ -22,8 +22,8 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" infrautilconditions "sigs.k8s.io/cluster-api-provider-aws/v2/util/conditions" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) // ReconcileNetwork reconciles the network of the given cluster. @@ -32,66 +32,66 @@ func (s *Service) ReconcileNetwork() (err error) { // VPC. if err := s.reconcileVPC(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcReadyCondition, infrav1.VpcReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcReadyCondition, infrav1.VpcReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.VpcReadyCondition) + v1beta1conditions.MarkTrue(s.scope.InfraCluster(), infrav1.VpcReadyCondition) // Secondary CIDRs if err := s.associateSecondaryCidrs(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SecondaryCidrsReadyCondition, infrav1.SecondaryCidrReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SecondaryCidrsReadyCondition, infrav1.SecondaryCidrReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.SecondaryCidrsReadyCondition) + v1beta1conditions.MarkTrue(s.scope.InfraCluster(), infrav1.SecondaryCidrsReadyCondition) // Subnets. if err := s.reconcileSubnets(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SubnetsReadyCondition, infrav1.SubnetsReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SubnetsReadyCondition, infrav1.SubnetsReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.SubnetsReadyCondition) + v1beta1conditions.MarkTrue(s.scope.InfraCluster(), infrav1.SubnetsReadyCondition) // Internet Gateways. if err := s.reconcileInternetGateways(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.InternetGatewayReadyCondition, infrav1.InternetGatewayFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.InternetGatewayReadyCondition, infrav1.InternetGatewayFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.InternetGatewayReadyCondition) + v1beta1conditions.MarkTrue(s.scope.InfraCluster(), infrav1.InternetGatewayReadyCondition) // Carrier Gateway. if err := s.reconcileCarrierGateway(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.CarrierGatewayReadyCondition, infrav1.CarrierGatewayFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.CarrierGatewayReadyCondition, infrav1.CarrierGatewayFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.CarrierGatewayReadyCondition) + v1beta1conditions.MarkTrue(s.scope.InfraCluster(), infrav1.CarrierGatewayReadyCondition) // Egress Only Internet Gateways. if err := s.reconcileEgressOnlyInternetGateways(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition, infrav1.EgressOnlyInternetGatewayFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition, infrav1.EgressOnlyInternetGatewayFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition) + v1beta1conditions.MarkTrue(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition) // NAT Gateways. if err := s.reconcileNatGateways(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition, infrav1.NatGatewaysReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition, infrav1.NatGatewaysReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition) + v1beta1conditions.MarkTrue(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition) // Routing tables. if err := s.reconcileRouteTables(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.RouteTablesReadyCondition, infrav1.RouteTableReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.RouteTablesReadyCondition, infrav1.RouteTableReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.RouteTablesReadyCondition) + v1beta1conditions.MarkTrue(s.scope.InfraCluster(), infrav1.RouteTablesReadyCondition) // VPC Endpoints. if err := s.reconcileVPCEndpoints(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcEndpointsReadyCondition, infrav1.VpcEndpointsReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcEndpointsReadyCondition, infrav1.VpcEndpointsReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.VpcEndpointsReadyCondition) + v1beta1conditions.MarkTrue(s.scope.InfraCluster(), infrav1.VpcEndpointsReadyCondition) s.scope.Debug("Reconcile network completed successfully") return nil @@ -120,40 +120,40 @@ func (s *Service) DeleteNetwork() (err error) { vpc.DeepCopyInto(s.scope.VPC()) // VPC Endpoints. - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcEndpointsReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcEndpointsReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return err } if err := s.deleteVPCEndpoints(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcEndpointsReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcEndpointsReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcEndpointsReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcEndpointsReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") // Routing tables. - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.RouteTablesReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.RouteTablesReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return err } if err := s.deleteRouteTables(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.RouteTablesReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.RouteTablesReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.RouteTablesReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.RouteTablesReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") // NAT Gateways. - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return err } if err := s.deleteNatGateways(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") // EIPs. if err := s.releaseAddresses(); err != nil { @@ -161,68 +161,68 @@ func (s *Service) DeleteNetwork() (err error) { } // Internet Gateways. - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.InternetGatewayReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.InternetGatewayReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return err } if err := s.deleteInternetGateways(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.InternetGatewayReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.InternetGatewayReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.InternetGatewayReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.InternetGatewayReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") // Carrier Gateway. if s.scope.VPC().CarrierGatewayID != nil { if err := s.deleteCarrierGateway(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.CarrierGatewayReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.CarrierGatewayReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.CarrierGatewayReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.CarrierGatewayReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") } // Egress Only Internet Gateways. - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return err } if err := s.deleteEgressOnlyInternetGateways(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") // Subnets. - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SubnetsReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SubnetsReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return err } if err := s.deleteSubnets(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SubnetsReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SubnetsReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SubnetsReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SubnetsReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") // Secondary CIDR. - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SecondaryCidrsReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SecondaryCidrsReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.disassociateSecondaryCidrs(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SecondaryCidrsReadyCondition, "DisassociateFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SecondaryCidrsReadyCondition, "DisassociateFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } // VPC. - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return err } if err := s.deleteVPC(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") s.scope.Debug("Delete network completed successfully") return nil diff --git a/pkg/cloud/services/network/routetables.go b/pkg/cloud/services/network/routetables.go index 21dd039ff1..7f9a921376 100644 --- a/pkg/cloud/services/network/routetables.go +++ b/pkg/cloud/services/network/routetables.go @@ -33,7 +33,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) const ( @@ -122,7 +122,7 @@ func (s *Service) reconcileRouteTables() error { s.scope.Debug("Subnet has been associated with route table", "subnet-id", sn.GetResourceID(), "route-table-id", rt.ID) sn.RouteTableID = aws.String(rt.ID) } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.RouteTablesReadyCondition) + v1beta1conditions.MarkTrue(s.scope.InfraCluster(), infrav1.RouteTablesReadyCondition) return nil } @@ -265,7 +265,8 @@ func (s *Service) createRouteTableWithRoutes(routes []*ec2.CreateRouteInput, isP out, err := s.EC2Client.CreateRouteTable(context.TODO(), &ec2.CreateRouteTableInput{ VpcId: aws.String(s.scope.VPC().ID), TagSpecifications: []types.TagSpecification{ - tags.BuildParamsToTagSpecification(types.ResourceTypeRouteTable, s.getRouteTableTagParams(services.TemporaryResourceID, isPublic, zone))}, + tags.BuildParamsToTagSpecification(types.ResourceTypeRouteTable, s.getRouteTableTagParams(services.TemporaryResourceID, isPublic, zone)), + }, }) if err != nil { record.Warnf(s.scope.InfraCluster(), "FailedCreateRouteTable", "Failed to create managed RouteTable: %v", err) diff --git a/pkg/cloud/services/network/routetables_test.go b/pkg/cloud/services/network/routetables_test.go index eb131b8217..526ec1d537 100644 --- a/pkg/cloud/services/network/routetables_test.go +++ b/pkg/cloud/services/network/routetables_test.go @@ -38,7 +38,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func TestReconcileRouteTables(t *testing.T) { diff --git a/pkg/cloud/services/network/secondarycidr_test.go b/pkg/cloud/services/network/secondarycidr_test.go index 3296072299..d6e8368a27 100644 --- a/pkg/cloud/services/network/secondarycidr_test.go +++ b/pkg/cloud/services/network/secondarycidr_test.go @@ -35,13 +35,13 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func setupNewManagedControlPlaneScope(cl client.Client) (*scope.ManagedControlPlaneScope, error) { return scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{ Client: cl, - Cluster: &v1beta1.Cluster{}, + Cluster: &clusterv1.Cluster{}, ControlPlane: &ekscontrolplanev1.AWSManagedControlPlane{ Spec: ekscontrolplanev1.AWSManagedControlPlaneSpec{ SecondaryCidrBlock: ptr.To[string]("secondary-cidr"), @@ -101,7 +101,8 @@ func TestServiceAssociateSecondaryCidr(t *testing.T) { {CidrBlock: aws.String("secondary-cidr")}, }, }, - }}, nil) + }, + }, nil) }, }, { @@ -123,7 +124,8 @@ func TestServiceAssociateSecondaryCidr(t *testing.T) { { CidrBlockAssociationSet: []types.VpcCidrBlockAssociation{}, }, - }}, nil) + }, + }, nil) m.AssociateVpcCidrBlock(context.TODO(), gomock.AssignableToTypeOf(&ec2.AssociateVpcCidrBlockInput{})).Return(nil, awserrors.NewFailedDependency("dependency-failure")) }, wantErr: true, @@ -137,7 +139,8 @@ func TestServiceAssociateSecondaryCidr(t *testing.T) { { CidrBlockAssociationSet: []types.VpcCidrBlockAssociation{}, }, - }}, nil) + }, + }, nil) m.AssociateVpcCidrBlock(context.TODO(), gomock.AssignableToTypeOf(&ec2.AssociateVpcCidrBlockInput{})).Return(&ec2.AssociateVpcCidrBlockOutput{ CidrBlockAssociation: &types.VpcCidrBlockAssociation{ AssociationId: ptr.To[string]("association-id-success"), @@ -179,7 +182,8 @@ func TestServiceAssociateSecondaryCidr(t *testing.T) { }, }, }, - }}, nil) + }, + }, nil) // ...the other two should be created m.AssociateVpcCidrBlock(context.TODO(), gomock.Eq(&ec2.AssociateVpcCidrBlockInput{ @@ -286,7 +290,8 @@ func TestServiceDiassociateSecondaryCidr(t *testing.T) { {CidrBlock: aws.String("secondary-cidr")}, }, }, - }}, nil) + }, + }, nil) m.DisassociateVpcCidrBlock(context.TODO(), gomock.AssignableToTypeOf(&ec2.DisassociateVpcCidrBlockInput{})).Return(nil, nil) }, }, @@ -301,7 +306,8 @@ func TestServiceDiassociateSecondaryCidr(t *testing.T) { {CidrBlock: aws.String("secondary-cidr")}, }, }, - }}, nil) + }, + }, nil) m.DisassociateVpcCidrBlock(context.TODO(), gomock.AssignableToTypeOf(&ec2.DisassociateVpcCidrBlockInput{})).Return(nil, awserrors.NewFailedDependency("dependency-failure")) }, wantErr: true, @@ -315,7 +321,8 @@ func TestServiceDiassociateSecondaryCidr(t *testing.T) { { CidrBlockAssociationSet: []types.VpcCidrBlockAssociation{}, }, - }}, nil) + }, + }, nil) // No calls expected m.DisassociateVpcCidrBlock(context.TODO(), gomock.Any()).Times(0) @@ -366,7 +373,8 @@ func TestServiceDiassociateSecondaryCidr(t *testing.T) { }, }, }, - }}, nil) + }, + }, nil) m.DisassociateVpcCidrBlock(context.TODO(), gomock.Eq(&ec2.DisassociateVpcCidrBlockInput{ AssociationId: ptr.To[string]("association-id-existing-1"), // 10.0.1.0/24 (see above) diff --git a/pkg/cloud/services/network/subnets.go b/pkg/cloud/services/network/subnets.go index f339a9a8c0..991e268fd2 100644 --- a/pkg/cloud/services/network/subnets.go +++ b/pkg/cloud/services/network/subnets.go @@ -39,7 +39,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/internal/cidr" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) const ( @@ -205,7 +205,7 @@ func (s *Service) reconcileSubnets() error { } s.scope.Debug("Reconciled subnets", "subnets", subnets) - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.SubnetsReadyCondition) + v1beta1conditions.MarkTrue(s.scope.InfraCluster(), infrav1.SubnetsReadyCondition) return nil } diff --git a/pkg/cloud/services/network/subnets_test.go b/pkg/cloud/services/network/subnets_test.go index f14c9b7deb..af10b34712 100644 --- a/pkg/cloud/services/network/subnets_test.go +++ b/pkg/cloud/services/network/subnets_test.go @@ -40,7 +40,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) const ( diff --git a/pkg/cloud/services/network/vpc.go b/pkg/cloud/services/network/vpc.go index 078afd1dd7..4e7bee5b02 100644 --- a/pkg/cloud/services/network/vpc.go +++ b/pkg/cloud/services/network/vpc.go @@ -36,8 +36,8 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/utils" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) const ( @@ -137,8 +137,8 @@ func (s *Service) reconcileVPC() error { s.scope.VPC().Tags = vpc.Tags s.scope.VPC().ID = vpc.ID - if !conditions.Has(s.scope.InfraCluster(), infrav1.VpcReadyCondition) { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcReadyCondition, infrav1.VpcCreationStartedReason, clusterv1.ConditionSeverityInfo, "") + if !v1beta1conditions.Has(s.scope.InfraCluster(), infrav1.VpcReadyCondition) { + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcReadyCondition, infrav1.VpcCreationStartedReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return errors.Wrap(err, "failed to patch conditions") } diff --git a/pkg/cloud/services/network/vpc_test.go b/pkg/cloud/services/network/vpc_test.go index 9c2f5f3a22..9b9f3a1e4d 100644 --- a/pkg/cloud/services/network/vpc_test.go +++ b/pkg/cloud/services/network/vpc_test.go @@ -34,7 +34,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func describeVpcAttributeTrue(_ context.Context, input *ec2.DescribeVpcAttributeInput, _ ...ec2.Options) (*ec2.DescribeVpcAttributeOutput, error) { diff --git a/pkg/cloud/services/s3/s3_test.go b/pkg/cloud/services/s3/s3_test.go index 378d3114d3..0f4dbb2e78 100644 --- a/pkg/cloud/services/s3/s3_test.go +++ b/pkg/cloud/services/s3/s3_test.go @@ -43,7 +43,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/s3" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/s3/mock_s3iface" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/sts/mock_stsiface" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) const ( diff --git a/pkg/cloud/services/secretsmanager/secret_test.go b/pkg/cloud/services/secretsmanager/secret_test.go index 2f9b83dc40..8c07fcdfe0 100644 --- a/pkg/cloud/services/secretsmanager/secret_test.go +++ b/pkg/cloud/services/secretsmanager/secret_test.go @@ -35,7 +35,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func TestServiceCreate(t *testing.T) { diff --git a/pkg/cloud/services/securitygroup/securitygroups.go b/pkg/cloud/services/securitygroup/securitygroups.go index 9de501f7c5..f2beee18b8 100644 --- a/pkg/cloud/services/securitygroup/securitygroups.go +++ b/pkg/cloud/services/securitygroup/securitygroups.go @@ -38,8 +38,8 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/utils" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) const ( @@ -197,7 +197,7 @@ func (s *Service) ReconcileSecurityGroups() error { s.scope.Debug("Authorized ingress rules in security group", "authorized-ingress-rules", toAuthorize, "security-group-id", sg.ID) } } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.ClusterSecurityGroupsReadyCondition) + v1beta1conditions.MarkTrue(s.scope.InfraCluster(), infrav1.ClusterSecurityGroupsReadyCondition) return nil } @@ -308,7 +308,7 @@ func (s *Service) ec2SecurityGroupToSecurityGroup(ec2SecurityGroup types.Securit func (s *Service) DeleteSecurityGroups() error { if s.scope.VPC().ID == "" { s.scope.Debug("Skipping security group deletion, vpc-id is nil", "vpc-id", s.scope.VPC().ID) - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.ClusterSecurityGroupsReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.ClusterSecurityGroupsReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") return nil } @@ -322,7 +322,7 @@ func (s *Service) DeleteSecurityGroups() error { return nil } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.ClusterSecurityGroupsReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.ClusterSecurityGroupsReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return err } @@ -331,7 +331,7 @@ func (s *Service) DeleteSecurityGroups() error { sg := clusterGroups[i] current := sg.IngressRules if err := s.revokeAllSecurityGroupIngressRules(sg.ID); awserrors.IsIgnorableSecurityGroupError(err) != nil { //nolint:gocritic - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.ClusterSecurityGroupsReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.ClusterSecurityGroupsReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } @@ -343,10 +343,10 @@ func (s *Service) DeleteSecurityGroups() error { } if err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.ClusterSecurityGroupsReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.ClusterSecurityGroupsReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.ClusterSecurityGroupsReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1.ClusterSecurityGroupsReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") return nil } diff --git a/pkg/cloud/services/securitygroup/securitygroups_test.go b/pkg/cloud/services/securitygroup/securitygroups_test.go index 2fd1cc64db..4a2a7b33b1 100644 --- a/pkg/cloud/services/securitygroup/securitygroups_test.go +++ b/pkg/cloud/services/securitygroup/securitygroups_test.go @@ -41,7 +41,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) var ( diff --git a/pkg/cloud/services/ssm/secret_test.go b/pkg/cloud/services/ssm/secret_test.go index d140cc9ecb..0f15dc9f48 100644 --- a/pkg/cloud/services/ssm/secret_test.go +++ b/pkg/cloud/services/ssm/secret_test.go @@ -38,7 +38,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ssm/mock_ssmiface" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) type mockAPIError struct { diff --git a/pkg/rosa/client.go b/pkg/rosa/client.go index 90670772c3..43d3d33565 100644 --- a/pkg/rosa/client.go +++ b/pkg/rosa/client.go @@ -1,3 +1,19 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + // Package rosa provides a way to interact with the Red Hat OpenShift Service on AWS (ROSA) API. package rosa @@ -28,8 +44,15 @@ const ( capaAgentName = "CAPA" ) +// OCMSecretsRetriever contains functions that are needed for creating OCM connection. +type OCMSecretsRetriever interface { + CredentialsSecret() *corev1.Secret + GetClient() client.Client // Or just Client, depending on your actual field + Info(msg string, keysAndValues ...interface{}) +} + // NewOCMClient creates a new OCM client. -func NewOCMClient(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (*ocm.Client, error) { +func NewOCMClient(ctx context.Context, rosaScope OCMSecretsRetriever) (*ocm.Client, error) { token, url, clientID, clientSecret, err := ocmCredentials(ctx, rosaScope) if err != nil { return nil, err @@ -62,6 +85,25 @@ func NewWrappedOCMClient(ctx context.Context, rosaScope *scope.ROSAControlPlaneS return &c, err } +// NewWrappedOCMClientWithoutControlPlane creates OCM connection without controlplane. +func NewWrappedOCMClientWithoutControlPlane(ctx context.Context, rosaScope OCMSecretsRetriever) (OCMClient, error) { + ocmClient, err := NewOCMClient(ctx, rosaScope) + c := ocmclient{ + ocmClient: ocmClient, + } + + return &c, err +} + +// NewWrappedOCMClientFromOCMClient makes a wrapped OCM client from an existing OCM client. +func NewWrappedOCMClientFromOCMClient(ctx context.Context, ocmClient *ocm.Client) (OCMClient, error) { + c := ocmclient{ + ocmClient: ocmClient, + } + + return &c, nil +} + func newOCMRawConnection(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (*sdk.Connection, error) { ocmSdkLogger, err := sdk.NewGoLoggerBuilder(). Debug(false). @@ -94,7 +136,9 @@ func newOCMRawConnection(ctx context.Context, rosaScope *scope.ROSAControlPlaneS return connection, nil } -func ocmCredentials(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (string, string, string, string, error) { +// OCMSecretsRetriever defines the interface for types that can provide OCM credentials information. + +func ocmCredentials(ctx context.Context, rosaScope OCMSecretsRetriever) (string, string, string, string, error) { var token string // Offline SSO token var ocmClientID string // Service account client id var ocmClientSecret string // Service account client secret @@ -102,8 +146,9 @@ func ocmCredentials(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) var secret *corev1.Secret secret = rosaScope.CredentialsSecret() // We'll retrieve the OCM credentials ref from the ROSA control plane + if secret != nil { - if err := rosaScope.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret); err != nil { + if err := rosaScope.GetClient().Get(ctx, client.ObjectKeyFromObject(secret), secret); err != nil { return "", "", "", "", fmt.Errorf("failed to get credentials secret: %w", err) } } else { // If the reference to OCM secret wasn't specified in the ROSA control plane, we'll try to use a predefined secret name from the capa namespace @@ -114,7 +159,7 @@ func ocmCredentials(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) }, } - err := rosaScope.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret) + err := rosaScope.GetClient().Get(ctx, client.ObjectKeyFromObject(secret), secret) // We'll ignore non-existent secret so that we can try the ENV variable fallback below // TODO: once the ENV variable fallback is gone, we can no longer ignore non-existent secret here if err != nil && !apierrors.IsNotFound(err) { @@ -152,3 +197,21 @@ func ocmCredentials(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) return token, ocmAPIUrl, ocmClientID, ocmClientSecret, nil } + +// GetOCMClientEnv return env name based on ocmCient assigned url defaults to production. +// "production": "https://api.openshift.com", +// "staging": "https://api.stage.openshift.com", +// "integration": "https://api.integration.openshift.com", +// "local": "http://localhost:8000", +// "local-proxy": "http://localhost:9000", +// "crc": "https://clusters-service.apps-crc.testing", +func GetOCMClientEnv(ocmClient *ocm.Client) string { + for k, v := range ocm.URLAliases { + if v == ocmClient.GetConnectionURL() { + return k + } + } + + // Defaults to production + return ocm.Production +} diff --git a/pkg/rosa/ocmclient.go b/pkg/rosa/ocmclient.go index 04c4fa700a..d13292cb67 100644 --- a/pkg/rosa/ocmclient.go +++ b/pkg/rosa/ocmclient.go @@ -1,8 +1,25 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + // Package rosa provides a way to interact with the Red Hat OpenShift Service on AWS (ROSA) API. package rosa import ( "context" + "fmt" v1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" "github.com/openshift/rosa/pkg/aws" @@ -30,11 +47,12 @@ type OCMClient interface { GetCluster(clusterKey string, creator *aws.Creator) (*v1.Cluster, error) GetControlPlaneUpgradePolicies(clusterID string) (controlPlaneUpgradePolicies []*v1.ControlPlaneUpgradePolicy, err error) GetHTPasswdUserList(clusterID string, htpasswdIDPId string) (*v1.HTPasswdUserList, error) + GetHypershiftNodePoolUpgrade(clusterID string, clusterKey string, nodePoolID string) (*v1.NodePool, *v1.NodePoolUpgradePolicy, error) GetIdentityProviders(clusterID string) ([]*v1.IdentityProvider, error) GetMissingGateAgreementsHypershift(clusterID string, upgradePolicy *v1.ControlPlaneUpgradePolicy) ([]*v1.VersionGate, error) GetNodePool(clusterID string, nodePoolID string) (*v1.NodePool, bool, error) GetNodePools(clusterID string) ([]*v1.NodePool, error) - GetHypershiftNodePoolUpgrade(clusterID string, clusterKey string, nodePoolID string) (*v1.NodePool, *v1.NodePoolUpgradePolicy, error) + GetPolicies(policyType string) (map[string]*v1.AWSSTSPolicy, error) GetUser(clusterID string, group string, username string) (*v1.User, error) ScheduleHypershiftControlPlaneUpgrade(clusterID string, upgradePolicy *v1.ControlPlaneUpgradePolicy) (*v1.ControlPlaneUpgradePolicy, error) ScheduleNodePoolUpgrade(clusterID string, nodePoolID string, upgradePolicy *v1.NodePoolUpgradePolicy) (*v1.NodePoolUpgradePolicy, error) @@ -108,6 +126,10 @@ func (c *ocmclient) GetCluster(clusterKey string, creator *aws.Creator) (*v1.Clu return c.ocmClient.GetCluster(clusterKey, creator) } +func (c *ocmclient) GetPolicies(policyType string) (map[string]*v1.AWSSTSPolicy, error) { + return c.ocmClient.GetPolicies(policyType) +} + func (c *ocmclient) GetUser(clusterID string, group string, username string) (*v1.User, error) { return c.ocmClient.GetUser(clusterID, group, username) } @@ -136,3 +158,16 @@ func (c *ocmclient) ValidateHypershiftVersion(versionRawID string, channelGroup func NewMockOCMClient(ctx context.Context, rosaScope *scope.ROSAControlPlaneScope) (OCMClient, error) { return &ocmclient{ocmClient: &ocm.Client{}}, nil } + +// ConvertToRosaOcmClient convert OCMClient to *ocm.Client that is needed by rosa-cli lib. +func ConvertToRosaOcmClient(i OCMClient) (*ocm.Client, error) { + c, ok := i.(*ocmclient) + if !ok { + c, ok := i.(*ocm.Client) + if !ok { + return nil, fmt.Errorf("failed to convert to Rosa OCM Client") + } + return c, nil + } + return c.ocmClient, nil +} diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index c056042fba..886470530c 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -9,20 +9,19 @@ import ( "k8s.io/utils/ptr" crclient "sigs.k8s.io/controller-runtime/pkg/client" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // GetMachinePools belong to a cluster. -func GetMachinePools(ctx context.Context, client crclient.Client, clusterName string, clusterNS string) ([]expclusterv1.MachinePool, error) { - machinePoolList := expclusterv1.MachinePoolList{} +func GetMachinePools(ctx context.Context, client crclient.Client, clusterName string, clusterNS string) ([]clusterv1.MachinePool, error) { + machinePoolList := clusterv1.MachinePoolList{} listOptions := []crclient.ListOption{ crclient.InNamespace(clusterNS), crclient.MatchingLabels(map[string]string{clusterv1.ClusterNameLabel: clusterName}), } if err := client.List(ctx, &machinePoolList, listOptions...); err != nil { - return []expclusterv1.MachinePool{}, fmt.Errorf("failed to list machine pools for cluster %s: %v", clusterName, err) + return []clusterv1.MachinePool{}, fmt.Errorf("failed to list machine pools for cluster %s: %v", clusterName, err) } return machinePoolList.Items, nil diff --git a/templates/cluster-template-multitenancy-clusterclass.yaml b/templates/cluster-template-multitenancy-clusterclass.yaml index a7599b0fb0..4f76099979 100644 --- a/templates/cluster-template-multitenancy-clusterclass.yaml +++ b/templates/cluster-template-multitenancy-clusterclass.yaml @@ -247,7 +247,7 @@ spec: - name: identityRef value: kind: AWSClusterRoleIdentity - name: ${MULTI_TENANCY_NESTED_IDENTITY_NAME} + name: ${MULTI_TENANCY_NESTED_IDENTITY_NAME}-${CLUSTER_NAME} version: ${KUBERNETES_VERSION} workers: machineDeployments: @@ -277,12 +277,12 @@ spec: apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: AWSClusterRoleIdentity metadata: - name: ${MULTI_TENANCY_JUMP_IDENTITY_NAME} + name: ${MULTI_TENANCY_JUMP_IDENTITY_NAME}-${CLUSTER_NAME} spec: allowedNamespaces: {} durationSeconds: 900 roleARN: ${MULTI_TENANCY_JUMP_ROLE_ARN} - sessionName: ${MULTI_TENANCY_JUMP_IDENTITY_NAME}-session + sessionName: ${MULTI_TENANCY_JUMP_IDENTITY_NAME}-${CLUSTER_NAME} sourceIdentityRef: kind: AWSClusterControllerIdentity name: default @@ -290,11 +290,11 @@ spec: apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: AWSClusterRoleIdentity metadata: - name: ${MULTI_TENANCY_NESTED_IDENTITY_NAME} + name: ${MULTI_TENANCY_NESTED_IDENTITY_NAME}-${CLUSTER_NAME} spec: allowedNamespaces: {} roleARN: ${MULTI_TENANCY_NESTED_ROLE_ARN} - sessionName: ${MULTI_TENANCY_NESTED_IDENTITY_NAME}-session + sessionName: ${MULTI_TENANCY_NESTED_IDENTITY_NAME}-${CLUSTER_NAME} sourceIdentityRef: kind: AWSClusterRoleIdentity - name: ${MULTI_TENANCY_JUMP_IDENTITY_NAME} + name: ${MULTI_TENANCY_JUMP_IDENTITY_NAME}-${CLUSTER_NAME} diff --git a/templates/cluster-template-rosa-role-config.yaml b/templates/cluster-template-rosa-role-config.yaml new file mode 100644 index 0000000000..54130d1c5e --- /dev/null +++ b/templates/cluster-template-rosa-role-config.yaml @@ -0,0 +1,57 @@ +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: ROSARoleConfig +metadata: + name: "${CLUSTER_NAME}-role-config" +spec: + accountRoleConfig: + prefix: "${ACCOUNT_ROLES_PREFIX}" + version: "${OPENSHIFT_VERSION}" + operatorRoleConfig: + prefix: "${OPERATOR_ROLES_PREFIX}" + credentialsSecretRef: + name: rosa-creds-secret + oidcProviderType: Managed +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" +spec: + clusterNetwork: + pods: + cidrBlocks: ["192.168.0.0/16"] + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + kind: ROSACluster + name: "${CLUSTER_NAME}" + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 + kind: ROSAControlPlane + name: "${CLUSTER_NAME}-control-plane" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: ROSACluster +metadata: + name: "${CLUSTER_NAME}" +spec: {} +--- +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 +kind: ROSAControlPlane +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + rosaClusterName: ${CLUSTER_NAME:0:54} + version: "${OPENSHIFT_VERSION}" + region: "${AWS_REGION}" + network: + machineCIDR: "10.0.0.0/16" + subnets: + - "${PUBLIC_SUBNET_ID}" # remove if creating a private cluster + - "${PRIVATE_SUBNET_ID}" + availabilityZones: + - "${AWS_AVAILABILITY_ZONE}" + credentialsSecretRef: + name: rosa-creds-secret + rosaRoleConfigRef: + name: "${CLUSTER_NAME}-role-config" diff --git a/templates/rosa-network.yaml b/templates/rosa-network.yaml new file mode 100644 index 0000000000..7f34dd03bf --- /dev/null +++ b/templates/rosa-network.yaml @@ -0,0 +1,13 @@ +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: ROSANetwork +metadata: + name: "${ROSA_NETWORK_NAME}" +spec: + region: "${AWS_REGION}" + stackName: "${ROSA_NETWORK_NAME}" + availabilityZoneCount: ${AVAILABILITY_ZONE_COUNT} + cidrBlock: "${CIDR_BLOCK}" + identityRef: + kind: AWSClusterControllerIdentity + name: default diff --git a/test/e2e/data/e2e_conf.yaml b/test/e2e/data/e2e_conf.yaml index 4374126a9b..93376cea74 100644 --- a/test/e2e/data/e2e_conf.yaml +++ b/test/e2e/data/e2e_conf.yaml @@ -1,12 +1,11 @@ --- # E2E test scenario using local dev images and manifests built from the source tree for following providers: # - cluster-api -# - bootstrap kubeadm -# - control-plane kubeadm -# - aws +# - infrastructure aws # To run tests, run the following from the root of this repository. # `AWS_REGION=eu-west-1 make e2e GINKGO_ARGS=-stream E2E_ARGS=-skip-cloudformation-deletion` +# The -stream flag will make Ginkgo print results to the screen in real-time. # -skip-cloudformation-deletion reduces the time taken to set up AWS CloudFormation prior to cluster start. # AWS credentials must be present for running tests @@ -26,11 +25,11 @@ images: loadBehavior: tryLoad - name: quay.io/jetstack/cert-manager-controller:v1.17.2 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.10.2 + - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.11.1 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.10.2 + - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.11.1 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.10.2 + - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.11.1 loadBehavior: tryLoad providers: @@ -42,18 +41,18 @@ providers: type: "url" contract: v1beta1 files: - - sourcePath: "./shared/v1beta1/metadata.yaml" + - sourcePath: "./shared/capi/v1.2/metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" - old: --metrics-bind-addr=127.0.0.1:8080 new: --metrics-bind-addr=:8080 - - name: v1.10.2 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.10.2/core-components.yaml" + - name: v1.11.1 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.11.1/core-components.yaml" type: "url" - contract: v1beta1 + contract: v1beta2 files: - - sourcePath: "./shared/v1beta1/metadata.yaml" + - sourcePath: "./shared/capi/v1.11/metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" @@ -62,25 +61,25 @@ providers: - name: kubeadm type: BootstrapProvider files: - - sourcePath: "./shared/v1beta1/metadata.yaml" + - sourcePath: "./shared/capi/v1.2/metadata.yaml" versions: - name: v1.2.0 # earliest published release in the v1beta1 series; this is used for v1beta1 old --> v1beta1 latest clusterctl upgrades test only. value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.0/bootstrap-components.yaml" type: "url" contract: v1beta1 files: - - sourcePath: "./shared/v1beta1/metadata.yaml" + - sourcePath: "./shared/capi/v1.2/metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" - old: --metrics-bind-addr=127.0.0.1:8080 new: --metrics-bind-addr=:8080 - - name: v1.10.2 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.10.2/bootstrap-components.yaml" + - name: v1.11.1 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.11.1/bootstrap-components.yaml" type: "url" - contract: v1beta1 + contract: v1beta2 files: - - sourcePath: "./shared/v1beta1/metadata.yaml" + - sourcePath: "./shared/capi/v1.11/metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" @@ -89,25 +88,25 @@ providers: - name: kubeadm type: ControlPlaneProvider files: - - sourcePath: "./shared/v1beta1/metadata.yaml" + - sourcePath: "./shared/capi/v1.2/metadata.yaml" versions: - name: v1.2.0 # earliest published release in the v1beta1 series; this is used for v1beta1 old --> main clusterctl upgrades test only. value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.2.0/control-plane-components.yaml" type: "url" contract: v1beta1 files: - - sourcePath: "./shared/v1beta1/metadata.yaml" + - sourcePath: "./shared/capi/v1.2/metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" - old: --metrics-bind-addr=127.0.0.1:8080 new: --metrics-bind-addr=:8080 - - name: v1.10.2 # latest published release in the v1beta1 series; this is used for v1beta1 --> v1beta1 latest clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.10.2/control-plane-components.yaml" + - name: v1.11.1 # latest published release in the v1beta1 series; this is used for v1beta1 --> v1beta1 latest clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.11.1/control-plane-components.yaml" type: "url" - contract: v1beta1 + contract: v1beta2 files: - - sourcePath: "./shared/v1beta1/metadata.yaml" + - sourcePath: "./shared/capi/v1.11/metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" @@ -136,7 +135,7 @@ providers: - name: v9.9.99 # Use manifest from source files value: ../../../config/default - # Do not add contract field for v1beta1 --> v1beta2 clusterctl upgrades test to work. + contract: v1beta1 files: - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-efs-support.yaml" - sourcePath: "./infrastructure-aws/withoutclusterclass/generated/cluster-template-external-csi.yaml" diff --git a/test/e2e/data/e2e_eks_conf.yaml b/test/e2e/data/e2e_eks_conf.yaml index fe9fca78e3..d4c51f0a8a 100644 --- a/test/e2e/data/e2e_eks_conf.yaml +++ b/test/e2e/data/e2e_eks_conf.yaml @@ -16,30 +16,32 @@ images: - name: gcr.io/k8s-staging-cluster-api/capa-manager:e2e loadBehavior: mustLoad -## PLEASE KEEP THESE UP TO DATE WITH THE COMPONENTS + ## PLEASE KEEP THESE UP TO DATE WITH THE COMPONENTS + + # Cluster API v1beta1 Preloads - name: quay.io/jetstack/cert-manager-cainjector:v1.17.2 loadBehavior: tryLoad - name: quay.io/jetstack/cert-manager-webhook:v1.17.2 loadBehavior: tryLoad - name: quay.io/jetstack/cert-manager-controller:v1.17.2 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.10.2 + - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.11.1 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.10.2 + - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.11.1 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.10.2 + - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.11.1 loadBehavior: tryLoad providers: - name: cluster-api type: CoreProvider versions: - - name: v1.10.2 - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.10.2/core-components.yaml" + - name: v1.11.1 + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.11.1/core-components.yaml" type: "url" - contract: v1beta1 + contract: v1beta2 files: - - sourcePath: "./shared/v1beta1/metadata.yaml" + - sourcePath: "./shared/capi/v1.11/metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" @@ -48,14 +50,14 @@ providers: - name: kubeadm type: BootstrapProvider files: - - sourcePath: "./shared/v1beta1/metadata.yaml" + - sourcePath: "./shared/capi/v1.2/metadata.yaml" versions: - - name: v1.10.2 - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.10.2/bootstrap-components.yaml" + - name: v1.11.1 + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.11.1/bootstrap-components.yaml" type: "url" - contract: v1beta1 + contract: v1beta2 files: - - sourcePath: "./shared/v1beta1/metadata.yaml" + - sourcePath: "./shared/capi/v1.11/metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" @@ -64,14 +66,14 @@ providers: - name: kubeadm type: ControlPlaneProvider files: - - sourcePath: "./shared/v1beta1/metadata.yaml" + - sourcePath: "./shared/capi/v1.11/metadata.yaml" versions: - - name: v1.10.2 - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.10.2/control-plane-components.yaml" + - name: v1.11.1 + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.11.1/control-plane-components.yaml" type: "url" - contract: v1beta1 + contract: v1beta2 files: - - sourcePath: "./shared/v1beta1/metadata.yaml" + - sourcePath: "./shared/capi/v1.11/metadata.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" @@ -112,10 +114,16 @@ providers: targetName: "cluster-template-eks-managedmachinepool.yaml" - sourcePath: "./eks/cluster-template-eks-ipv6-cluster.yaml" targetName: "cluster-template-eks-ipv6-cluster.yaml" + - sourcePath: "./eks/cluster-template-eks-upgrade-policy.yaml" + targetName: "cluster-template-eks-upgrade-policy.yaml" - sourcePath: "./eks/cluster-template-eks-control-plane-only-legacy.yaml" targetName: "cluster-template-eks-control-plane-only-legacy.yaml" - sourcePath: "./eks/cluster-template-eks-control-plane-bare-eks.yaml" targetName: "cluster-template-eks-control-plane-bare-eks.yaml" + - sourcePath: "./eks/cluster-template-eks-auth-api-and-config-map.yaml" + targetName: "cluster-template-eks-auth-api-and-config-map.yaml" + - sourcePath: "./eks/cluster-template-eks-auth-bootstrap-disabled.yaml" + targetName: "cluster-template-eks-auth-bootstrap-disabled.yaml" - sourcePath: "./infrastructure-aws/withclusterclass/kustomize_sources/eks-clusterclass/clusterclass-eks-e2e.yaml" - sourcePath: "./infrastructure-aws/withclusterclass/generated/cluster-template-eks-clusterclass.yaml" diff --git a/test/e2e/data/eks/cluster-template-eks-auth-api-and-config-map.yaml b/test/e2e/data/eks/cluster-template-eks-auth-api-and-config-map.yaml new file mode 100644 index 0000000000..d9e3c541e6 --- /dev/null +++ b/test/e2e/data/eks/cluster-template-eks-auth-api-and-config-map.yaml @@ -0,0 +1,39 @@ +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" +spec: + clusterNetwork: + pods: + cidrBlocks: ["192.168.0.0/16"] + infrastructureRef: + kind: AWSManagedCluster + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + name: "${CLUSTER_NAME}" + controlPlaneRef: + kind: AWSManagedControlPlane + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 + name: "${CLUSTER_NAME}-control-plane" +--- +kind: AWSManagedCluster +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +metadata: + name: "${CLUSTER_NAME}" +spec: {} +--- +kind: AWSManagedControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + region: "${AWS_REGION}" + sshKeyName: "${AWS_SSH_KEY_NAME}" + version: "${KUBERNETES_VERSION}" + accessConfig: + authenticationMode: api_and_config_map + bootstrapClusterCreatorAdminPermissions: true + identityRef: + kind: AWSClusterStaticIdentity + name: e2e-account + diff --git a/test/e2e/data/eks/cluster-template-eks-auth-bootstrap-disabled.yaml b/test/e2e/data/eks/cluster-template-eks-auth-bootstrap-disabled.yaml new file mode 100644 index 0000000000..3655f9c1dc --- /dev/null +++ b/test/e2e/data/eks/cluster-template-eks-auth-bootstrap-disabled.yaml @@ -0,0 +1,39 @@ +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" +spec: + clusterNetwork: + pods: + cidrBlocks: ["192.168.0.0/16"] + infrastructureRef: + kind: AWSManagedCluster + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + name: "${CLUSTER_NAME}" + controlPlaneRef: + kind: AWSManagedControlPlane + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 + name: "${CLUSTER_NAME}-control-plane" +--- +kind: AWSManagedCluster +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +metadata: + name: "${CLUSTER_NAME}" +spec: {} +--- +kind: AWSManagedControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + region: "${AWS_REGION}" + sshKeyName: "${AWS_SSH_KEY_NAME}" + version: "${KUBERNETES_VERSION}" + accessConfig: + authenticationMode: api_and_config_map + bootstrapClusterCreatorAdminPermissions: false + identityRef: + kind: AWSClusterStaticIdentity + name: e2e-account + diff --git a/test/e2e/data/eks/cluster-template-eks-managed-machinepool-only.yaml b/test/e2e/data/eks/cluster-template-eks-managed-machinepool-only.yaml index c296654920..df54da3371 100644 --- a/test/e2e/data/eks/cluster-template-eks-managed-machinepool-only.yaml +++ b/test/e2e/data/eks/cluster-template-eks-managed-machinepool-only.yaml @@ -25,3 +25,5 @@ spec: maxSize: 2 updateConfig: maxUnavailable: 2 + nodeRepairConfig: + enabled: false diff --git a/test/e2e/data/eks/cluster-template-eks-upgrade-policy.yaml b/test/e2e/data/eks/cluster-template-eks-upgrade-policy.yaml new file mode 100644 index 0000000000..16db15c010 --- /dev/null +++ b/test/e2e/data/eks/cluster-template-eks-upgrade-policy.yaml @@ -0,0 +1,35 @@ +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: "${CLUSTER_NAME}" +spec: + clusterNetwork: + pods: + cidrBlocks: ["192.168.0.0/16"] + infrastructureRef: + kind: AWSManagedCluster + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + name: "${CLUSTER_NAME}" + controlPlaneRef: + kind: AWSManagedControlPlane + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 + name: "${CLUSTER_NAME}-control-plane" +--- +kind: AWSManagedCluster +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +metadata: + name: "${CLUSTER_NAME}" +spec: {} +--- +kind: AWSManagedControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 +metadata: + name: "${CLUSTER_NAME}-control-plane" +spec: + region: "${AWS_REGION}" + version: "${KUBERNETES_VERSION}" + upgradePolicy: "${UPGRADE_POLICY}" + identityRef: + kind: AWSClusterStaticIdentity + name: e2e-account diff --git a/test/e2e/data/infrastructure-aws/withclusterclass/e2e_test_templates/cluster-template-nested-multitenancy-clusterclass.yaml b/test/e2e/data/infrastructure-aws/withclusterclass/e2e_test_templates/cluster-template-nested-multitenancy-clusterclass.yaml index 5195d99d84..0c852c7354 100644 --- a/test/e2e/data/infrastructure-aws/withclusterclass/e2e_test_templates/cluster-template-nested-multitenancy-clusterclass.yaml +++ b/test/e2e/data/infrastructure-aws/withclusterclass/e2e_test_templates/cluster-template-nested-multitenancy-clusterclass.yaml @@ -31,7 +31,7 @@ spec: - name: identityRef value: kind: AWSClusterRoleIdentity - name: ${MULTI_TENANCY_NESTED_IDENTITY_NAME} + name: ${MULTI_TENANCY_NESTED_IDENTITY_NAME}-${CLUSTER_NAME} version: ${KUBERNETES_VERSION} workers: machineDeployments: @@ -87,12 +87,12 @@ spec: apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: AWSClusterRoleIdentity metadata: - name: ${MULTI_TENANCY_JUMP_IDENTITY_NAME} + name: ${MULTI_TENANCY_JUMP_IDENTITY_NAME}-${CLUSTER_NAME} spec: allowedNamespaces: {} durationSeconds: 900 roleARN: ${MULTI_TENANCY_JUMP_ROLE_ARN} - sessionName: ${MULTI_TENANCY_JUMP_IDENTITY_NAME}-session + sessionName: ${MULTI_TENANCY_JUMP_IDENTITY_NAME}-${CLUSTER_NAME} sourceIdentityRef: kind: AWSClusterControllerIdentity name: default @@ -100,14 +100,14 @@ spec: apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: AWSClusterRoleIdentity metadata: - name: ${MULTI_TENANCY_NESTED_IDENTITY_NAME} + name: ${MULTI_TENANCY_NESTED_IDENTITY_NAME}-${CLUSTER_NAME} spec: allowedNamespaces: {} roleARN: ${MULTI_TENANCY_NESTED_ROLE_ARN} - sessionName: ${MULTI_TENANCY_NESTED_IDENTITY_NAME}-session + sessionName: ${MULTI_TENANCY_NESTED_IDENTITY_NAME}-${CLUSTER_NAME} sourceIdentityRef: kind: AWSClusterRoleIdentity - name: ${MULTI_TENANCY_JUMP_IDENTITY_NAME} + name: ${MULTI_TENANCY_JUMP_IDENTITY_NAME}-${CLUSTER_NAME} --- apiVersion: v1 data: diff --git a/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/nested-multitenancy-clusterclass/cluster-template.yaml b/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/nested-multitenancy-clusterclass/cluster-template.yaml index 2636c4bcc4..0dd1a6e3fe 100644 --- a/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/nested-multitenancy-clusterclass/cluster-template.yaml +++ b/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/nested-multitenancy-clusterclass/cluster-template.yaml @@ -37,7 +37,7 @@ spec: - name: identityRef value: kind: AWSClusterRoleIdentity - name: "${MULTI_TENANCY_NESTED_IDENTITY_NAME}" + name: "${MULTI_TENANCY_NESTED_IDENTITY_NAME}-${CLUSTER_NAME}" --- apiVersion: v1 data: ${CNI_RESOURCES} diff --git a/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/nested-multitenancy-clusterclass/role.yaml b/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/nested-multitenancy-clusterclass/role.yaml index 08ce72cd0b..da233b0801 100644 --- a/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/nested-multitenancy-clusterclass/role.yaml +++ b/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/nested-multitenancy-clusterclass/role.yaml @@ -2,11 +2,11 @@ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: AWSClusterRoleIdentity metadata: - name: "${MULTI_TENANCY_JUMP_IDENTITY_NAME}" + name: "${MULTI_TENANCY_JUMP_IDENTITY_NAME}-${CLUSTER_NAME}" spec: roleARN: "${MULTI_TENANCY_JUMP_ROLE_ARN}" durationSeconds: 900 - sessionName: "${MULTI_TENANCY_JUMP_IDENTITY_NAME}-session" + sessionName: "${MULTI_TENANCY_JUMP_IDENTITY_NAME}-${CLUSTER_NAME}" sourceIdentityRef: kind: AWSClusterControllerIdentity name: "default" @@ -15,11 +15,11 @@ spec: apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: AWSClusterRoleIdentity metadata: - name: "${MULTI_TENANCY_NESTED_IDENTITY_NAME}" + name: "${MULTI_TENANCY_NESTED_IDENTITY_NAME}-${CLUSTER_NAME}" spec: roleARN: "${MULTI_TENANCY_NESTED_ROLE_ARN}" - sessionName: "${MULTI_TENANCY_NESTED_IDENTITY_NAME}-session" + sessionName: "${MULTI_TENANCY_NESTED_IDENTITY_NAME}-${CLUSTER_NAME}" sourceIdentityRef: kind: AWSClusterRoleIdentity - name: "${MULTI_TENANCY_JUMP_IDENTITY_NAME}" + name: "${MULTI_TENANCY_JUMP_IDENTITY_NAME}-${CLUSTER_NAME}" allowedNamespaces: {} diff --git a/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/topology/clusterclass-ci-default.yaml b/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/topology/clusterclass-ci-default.yaml index f3e7c485ca..f99caeb7a5 100644 --- a/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/topology/clusterclass-ci-default.yaml +++ b/test/e2e/data/infrastructure-aws/withclusterclass/kustomize_sources/topology/clusterclass-ci-default.yaml @@ -5,7 +5,7 @@ metadata: spec: controlPlane: ref: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate name: ci-default-control-plane machineInfrastructure: @@ -205,7 +205,7 @@ spec: enabledIf: '{{ eq .selfHosted "yes" }}' definitions: - selector: - apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + apiVersion: controlplane.cluster.x-k8s.io/v1beta2 kind: KubeadmControlPlaneTemplate matchResources: controlPlane: true @@ -278,7 +278,7 @@ spec: healthCheckProtocol: HTTPS --- kind: KubeadmControlPlaneTemplate -apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +apiVersion: controlplane.cluster.x-k8s.io/v1beta2 metadata: name: ci-default-control-plane spec: @@ -288,20 +288,24 @@ spec: clusterConfiguration: apiServer: extraArgs: - cloud-provider: external + - name: cloud-provider + value: external controllerManager: extraArgs: - cloud-provider: external + - name: cloud-provider + value: external initConfiguration: nodeRegistration: name: '{{ ds.meta_data.local_hostname }}' kubeletExtraArgs: - cloud-provider: external + - name: cloud-provider + value: external joinConfiguration: nodeRegistration: name: '{{ ds.meta_data.local_hostname }}' kubeletExtraArgs: - cloud-provider: external + - name: cloud-provider + value: external --- apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: AWSMachineTemplate diff --git a/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-nested-multitenancy.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-nested-multitenancy.yaml index cab93da437..0250457be8 100644 --- a/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-nested-multitenancy.yaml +++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-nested-multitenancy.yaml @@ -29,7 +29,7 @@ spec: enabled: true identityRef: kind: AWSClusterRoleIdentity - name: ${MULTI_TENANCY_NESTED_IDENTITY_NAME} + name: ${MULTI_TENANCY_NESTED_IDENTITY_NAME}-${CLUSTER_NAME} network: vpc: availabilityZoneUsageLimit: 1 @@ -1165,12 +1165,12 @@ metadata: apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: AWSClusterRoleIdentity metadata: - name: ${MULTI_TENANCY_JUMP_IDENTITY_NAME} + name: ${MULTI_TENANCY_JUMP_IDENTITY_NAME}-${CLUSTER_NAME} spec: allowedNamespaces: {} durationSeconds: 900 roleARN: ${MULTI_TENANCY_JUMP_ROLE_ARN} - sessionName: ${MULTI_TENANCY_JUMP_IDENTITY_NAME}-session + sessionName: ${MULTI_TENANCY_JUMP_IDENTITY_NAME}-${CLUSTER_NAME} sourceIdentityRef: kind: AWSClusterControllerIdentity name: default @@ -1178,11 +1178,11 @@ spec: apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: AWSClusterRoleIdentity metadata: - name: ${MULTI_TENANCY_NESTED_IDENTITY_NAME} + name: ${MULTI_TENANCY_NESTED_IDENTITY_NAME}-${CLUSTER_NAME} spec: allowedNamespaces: {} roleARN: ${MULTI_TENANCY_NESTED_ROLE_ARN} - sessionName: ${MULTI_TENANCY_NESTED_IDENTITY_NAME}-session + sessionName: ${MULTI_TENANCY_NESTED_IDENTITY_NAME}-${CLUSTER_NAME} sourceIdentityRef: kind: AWSClusterRoleIdentity - name: ${MULTI_TENANCY_JUMP_IDENTITY_NAME} + name: ${MULTI_TENANCY_JUMP_IDENTITY_NAME}-${CLUSTER_NAME} diff --git a/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-simple-multitenancy.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-simple-multitenancy.yaml index 2668643ac5..6e7744ed08 100644 --- a/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-simple-multitenancy.yaml +++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/e2e_test_templates/cluster-template-simple-multitenancy.yaml @@ -1160,7 +1160,7 @@ spec: allowedNamespaces: {} durationSeconds: 900 roleARN: ${MULTI_TENANCY_SIMPLE_ROLE_ARN} - sessionName: ${MULTI_TENANCY_SIMPLE_IDENTITY_NAME}-session + sessionName: ${MULTI_TENANCY_SIMPLE_IDENTITY_NAME} sourceIdentityRef: kind: AWSClusterControllerIdentity name: default diff --git a/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/nested-multitenancy/patches/role-identity.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/nested-multitenancy/patches/role-identity.yaml index 3d65b79213..954f1981a0 100644 --- a/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/nested-multitenancy/patches/role-identity.yaml +++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/nested-multitenancy/patches/role-identity.yaml @@ -6,4 +6,4 @@ metadata: spec: identityRef: kind: AWSClusterRoleIdentity - name: "${MULTI_TENANCY_NESTED_IDENTITY_NAME}" + name: "${MULTI_TENANCY_NESTED_IDENTITY_NAME}-${CLUSTER_NAME}" diff --git a/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/nested-multitenancy/role.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/nested-multitenancy/role.yaml index 08ce72cd0b..da233b0801 100644 --- a/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/nested-multitenancy/role.yaml +++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/nested-multitenancy/role.yaml @@ -2,11 +2,11 @@ apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: AWSClusterRoleIdentity metadata: - name: "${MULTI_TENANCY_JUMP_IDENTITY_NAME}" + name: "${MULTI_TENANCY_JUMP_IDENTITY_NAME}-${CLUSTER_NAME}" spec: roleARN: "${MULTI_TENANCY_JUMP_ROLE_ARN}" durationSeconds: 900 - sessionName: "${MULTI_TENANCY_JUMP_IDENTITY_NAME}-session" + sessionName: "${MULTI_TENANCY_JUMP_IDENTITY_NAME}-${CLUSTER_NAME}" sourceIdentityRef: kind: AWSClusterControllerIdentity name: "default" @@ -15,11 +15,11 @@ spec: apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 kind: AWSClusterRoleIdentity metadata: - name: "${MULTI_TENANCY_NESTED_IDENTITY_NAME}" + name: "${MULTI_TENANCY_NESTED_IDENTITY_NAME}-${CLUSTER_NAME}" spec: roleARN: "${MULTI_TENANCY_NESTED_ROLE_ARN}" - sessionName: "${MULTI_TENANCY_NESTED_IDENTITY_NAME}-session" + sessionName: "${MULTI_TENANCY_NESTED_IDENTITY_NAME}-${CLUSTER_NAME}" sourceIdentityRef: kind: AWSClusterRoleIdentity - name: "${MULTI_TENANCY_JUMP_IDENTITY_NAME}" + name: "${MULTI_TENANCY_JUMP_IDENTITY_NAME}-${CLUSTER_NAME}" allowedNamespaces: {} diff --git a/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/simple-multitenancy/role.yaml b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/simple-multitenancy/role.yaml index 59fa15561e..27f6bc9d3d 100644 --- a/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/simple-multitenancy/role.yaml +++ b/test/e2e/data/infrastructure-aws/withoutclusterclass/kustomize_sources/simple-multitenancy/role.yaml @@ -6,7 +6,7 @@ metadata: spec: roleARN: "${MULTI_TENANCY_SIMPLE_ROLE_ARN}" durationSeconds: 900 - sessionName: "${MULTI_TENANCY_SIMPLE_IDENTITY_NAME}-session" + sessionName: "${MULTI_TENANCY_SIMPLE_IDENTITY_NAME}" sourceIdentityRef: kind: AWSClusterControllerIdentity name: "default" diff --git a/test/e2e/data/shared/capi/v1.11/metadata.yaml b/test/e2e/data/shared/capi/v1.11/metadata.yaml new file mode 100644 index 0000000000..4d94191c95 --- /dev/null +++ b/test/e2e/data/shared/capi/v1.11/metadata.yaml @@ -0,0 +1,44 @@ +# maps release series of major.minor to cluster-api contract version +# the contract version may change between minor or major versions, but *not* +# between patch versions. +# +# update this file only when a new major or minor version is released +apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3 +kind: Metadata +releaseSeries: + - major: 1 + minor: 11 + contract: v1beta2 + - major: 1 + minor: 10 + contract: v1beta1 + - major: 1 + minor: 9 + contract: v1beta1 + - major: 1 + minor: 8 + contract: v1beta1 + - major: 1 + minor: 7 + contract: v1beta1 + - major: 1 + minor: 6 + contract: v1beta1 + - major: 1 + minor: 5 + contract: v1beta1 + - major: 1 + minor: 4 + contract: v1beta1 + - major: 1 + minor: 3 + contract: v1beta1 + - major: 1 + minor: 2 + contract: v1beta1 + - major: 1 + minor: 1 + contract: v1beta1 + - major: 1 + minor: 0 + contract: v1beta1 diff --git a/test/e2e/data/shared/v1beta1/metadata.yaml b/test/e2e/data/shared/capi/v1.2/metadata.yaml similarity index 95% rename from test/e2e/data/shared/v1beta1/metadata.yaml rename to test/e2e/data/shared/capi/v1.2/metadata.yaml index 6f9155ed12..1bda6299c7 100644 --- a/test/e2e/data/shared/v1beta1/metadata.yaml +++ b/test/e2e/data/shared/capi/v1.2/metadata.yaml @@ -6,6 +6,9 @@ apiVersion: clusterctl.cluster.x-k8s.io/v1alpha3 kind: Metadata releaseSeries: + - major: 1 + minor: 11 + contract: v1beta1 - major: 1 minor: 10 contract: v1beta1 diff --git a/test/e2e/data/shared/v1beta2_provider/metadata.yaml b/test/e2e/data/shared/v1beta2_provider/metadata.yaml index 6355e900f4..7102f84b3b 100644 --- a/test/e2e/data/shared/v1beta2_provider/metadata.yaml +++ b/test/e2e/data/shared/v1beta2_provider/metadata.yaml @@ -63,6 +63,15 @@ releaseSeries: - major: 2 minor: 7 contract: v1beta1 + - major: 2 + minor: 8 + contract: v1beta1 + - major: 2 + minor: 9 + contract: v1beta1 + - major: 2 + minor: 10 + contract: v1beta1 - major: 9 minor: 9 contract: v1beta1 diff --git a/test/e2e/shared/aws.go b/test/e2e/shared/aws.go index 8288646156..ae5723b6f7 100644 --- a/test/e2e/shared/aws.go +++ b/test/e2e/shared/aws.go @@ -764,7 +764,7 @@ func ensureTestImageUploaded(ctx context.Context, e2eCtx *E2EContext) error { return err } - cmd := exec.Command("docker", "inspect", "--format='{{index .Id}}'", "gcr.io/k8s-staging-cluster-api/capa-manager:e2e") + cmd := exec.CommandContext(ctx, "docker", "inspect", "--format='{{index .Id}}'", "gcr.io/k8s-staging-cluster-api/capa-manager:e2e") var stdOut bytes.Buffer cmd.Stdout = &stdOut err := cmd.Run() @@ -775,7 +775,7 @@ func ensureTestImageUploaded(ctx context.Context, e2eCtx *E2EContext) error { imageSha := strings.ReplaceAll(strings.TrimSuffix(stdOut.String(), "\n"), "'", "") ecrImageName := repoName + ":e2e" - cmd = exec.Command("docker", "tag", imageSha, ecrImageName) //nolint:gosec + cmd = exec.CommandContext(ctx, "docker", "tag", imageSha, ecrImageName) //nolint:gosec err = cmd.Run() if err != nil { return err @@ -794,13 +794,13 @@ func ensureTestImageUploaded(ctx context.Context, e2eCtx *E2EContext) error { return errors.New("failed to decode ECR authentication token") } - cmd = exec.Command("docker", "login", "--username", strList[0], "--password", strList[1], "public.ecr.aws") //nolint:gosec + cmd = exec.CommandContext(ctx, "docker", "login", "--username", strList[0], "--password", strList[1], "public.ecr.aws") //nolint:gosec err = cmd.Run() if err != nil { return err } - cmd = exec.Command("docker", "push", ecrImageName) + cmd = exec.CommandContext(ctx, "docker", "push", ecrImageName) err = cmd.Run() if err != nil { return err @@ -919,6 +919,7 @@ func DumpCloudTrailEvents(e2eCtx *E2EContext) { page, err := paginator.NextPage(context.TODO()) if err != nil { fmt.Fprintf(GinkgoWriter, "Couldn't get AWS CloudTrail events: err=%v\n", err) + break } events = append(events, page.Events...) } diff --git a/test/e2e/shared/common.go b/test/e2e/shared/common.go index 53bce01ae4..5c95a9f348 100644 --- a/test/e2e/shared/common.go +++ b/test/e2e/shared/common.go @@ -37,7 +37,7 @@ import ( crclient "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/util" @@ -312,7 +312,11 @@ func postProcessBase64LogData(dir, src, dst string) error { } // Extract second line which contains the data (first line contains the command) - inputStringData := strings.Split(string(inputData), "\n")[1] + inputDataLines := strings.Split(string(inputData), "\n") + if len(inputDataLines) < 2 { + return errors.Errorf("source file %q does not contain expected data (need at least 2 lines, got %d), input data content: %q", sourceFile, len(inputDataLines), string(inputData)) + } + inputStringData := inputDataLines[1] // Trim spaces and the $ suffix. inputStringData = strings.TrimSpace(inputStringData) diff --git a/test/e2e/shared/defaults.go b/test/e2e/shared/defaults.go index 4b7adf22c6..3a9b00d2a5 100644 --- a/test/e2e/shared/defaults.go +++ b/test/e2e/shared/defaults.go @@ -30,6 +30,9 @@ import ( cgscheme "k8s.io/client-go/kubernetes/scheme" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + bootstrapv1beta1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta1" + controlplanev1beta1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/test/framework" ) @@ -69,6 +72,7 @@ const ( MultiTenancy = "MULTI_TENANCY_" EksUpgradeFromVersion = "UPGRADE_FROM_VERSION" EksUpgradeToVersion = "UPGRADE_TO_VERSION" + UpgradePolicy = "UPGRADE_POLICY" ClassicElbTestKubernetesFrom = "CLASSICELB_TEST_KUBERNETES_VERSION_FROM" ClassicElbTestKubernetesTo = "CLASSICELB_TEST_KUBERNETES_VERSION_TO" @@ -220,6 +224,12 @@ func getLimitedResources() map[string]*ServiceQuota { func DefaultScheme() *runtime.Scheme { sc := runtime.NewScheme() framework.TryAddDefaultSchemes(sc) + + // Temporary add v1beta1 scheme as long as the e2e tests use v1beta1 templates + _ = clusterv1beta1.AddToScheme(sc) + _ = bootstrapv1beta1.AddToScheme(sc) + _ = controlplanev1beta1.AddToScheme(sc) + _ = infrav1.AddToScheme(sc) _ = cgscheme.AddToScheme(sc) return sc diff --git a/test/e2e/shared/suite.go b/test/e2e/shared/suite.go index a5baf7ef48..2192a1d9b1 100644 --- a/test/e2e/shared/suite.go +++ b/test/e2e/shared/suite.go @@ -89,7 +89,7 @@ func Node1BeforeSuite(e2eCtx *E2EContext) []byte { templateDir := path.Join(e2eCtx.Settings.ArtifactFolder, "templates") newTemplatePath := templateDir + "/" + ciTemplateForUpgradeName - err = exec.Command("cp", ciTemplateForUpgradePath, newTemplatePath).Run() //nolint:gosec + err = exec.CommandContext(context.TODO(), "cp", ciTemplateForUpgradePath, newTemplatePath).Run() //nolint:gosec Expect(err).NotTo(HaveOccurred()) clusterctlCITemplateForUpgrade := clusterctl.Files{ diff --git a/test/e2e/suites/managed/control_plane_helpers.go b/test/e2e/suites/managed/control_plane_helpers.go index 0178236d32..f9ecf73f62 100644 --- a/test/e2e/suites/managed/control_plane_helpers.go +++ b/test/e2e/suites/managed/control_plane_helpers.go @@ -33,7 +33,7 @@ import ( crclient "sigs.k8s.io/controller-runtime/pkg/client" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework" clusterctl "sigs.k8s.io/cluster-api/test/framework/clusterctl" ) diff --git a/test/e2e/suites/managed/eks_auth_test.go b/test/e2e/suites/managed/eks_auth_test.go new file mode 100644 index 0000000000..af59c31a2c --- /dev/null +++ b/test/e2e/suites/managed/eks_auth_test.go @@ -0,0 +1,180 @@ +//go:build e2e +// +build e2e + +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package managed + +import ( + "context" + "fmt" + + ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" + "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared" + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/util" +) + +// EKS authentication mode e2e tests. +var _ = ginkgo.Describe("[managed] [auth] EKS authentication mode tests", func() { + var ( + namespace *corev1.Namespace + ctx context.Context + specName = "auth" + clusterName string + ) + + shared.ConditionalIt(runGeneralTests, "should create a cluster with api_and_config_map authentication mode", func() { + ginkgo.By("should have a valid test configuration") + Expect(e2eCtx.Environment.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. BootstrapClusterProxy can't be nil") + Expect(e2eCtx.E2EConfig).ToNot(BeNil(), "Invalid argument. e2eConfig can't be nil when calling %s spec", specName) + Expect(e2eCtx.E2EConfig.Variables).To(HaveKey(shared.KubernetesVersion)) + + ctx = context.TODO() + namespace = shared.SetupSpecNamespace(ctx, specName, e2eCtx) + clusterName = fmt.Sprintf("%s-%s", specName, util.RandomString(6)) + eksClusterName := getEKSClusterName(namespace.Name, clusterName) + + ginkgo.By("should create an EKS control plane with api_and_config_map authentication mode") + ManagedClusterSpec(ctx, func() ManagedClusterSpecInput { + return ManagedClusterSpecInput{ + E2EConfig: e2eCtx.E2EConfig, + ConfigClusterFn: defaultConfigCluster, + BootstrapClusterProxy: e2eCtx.Environment.BootstrapClusterProxy, + AWSSession: e2eCtx.BootstrapUserAWSSession, + Namespace: namespace, + ClusterName: clusterName, + Flavour: EKSAuthAPIAndConfigMapFlavor, + ControlPlaneMachineCount: 1, + WorkerMachineCount: 0, + } + }) + + ginkgo.By("EKS cluster should be active") + verifyClusterActiveAndOwned(ctx, eksClusterName, e2eCtx.BootstrapUserAWSSession) + + ginkgo.By("verifying cluster has the correct authentication mode") + verifyClusterAuthenticationMode(ctx, eksClusterName, ekstypes.AuthenticationModeApiAndConfigMap, e2eCtx.BootstrapUserAWSSession) + + ginkgo.By("attempting to downgrade from api_and_config_map to config_map should fail") + controlPlaneName := fmt.Sprintf("%s-control-plane", clusterName) + controlPlane := &ekscontrolplanev1.AWSManagedControlPlane{} + err := e2eCtx.Environment.BootstrapClusterProxy.GetClient().Get(ctx, client.ObjectKey{ + Namespace: namespace.Name, + Name: controlPlaneName, + }, controlPlane) + Expect(err).ToNot(HaveOccurred(), "failed to get control plane") + + controlPlane.Spec.AccessConfig.AuthenticationMode = ekscontrolplanev1.EKSAuthenticationModeConfigMap + err = e2eCtx.Environment.BootstrapClusterProxy.GetClient().Update(ctx, controlPlane) + Expect(err).To(HaveOccurred(), "expected downgrade from api_and_config_map to config_map to fail") + + ginkgo.By("upgrading from api_and_config_map to api should succeed") + err = e2eCtx.Environment.BootstrapClusterProxy.GetClient().Get(ctx, client.ObjectKey{ + Namespace: namespace.Name, + Name: controlPlaneName, + }, controlPlane) + Expect(err).ToNot(HaveOccurred(), "failed to get control plane for upgrade") + + controlPlane.Spec.AccessConfig.AuthenticationMode = ekscontrolplanev1.EKSAuthenticationModeAPI + err = e2eCtx.Environment.BootstrapClusterProxy.GetClient().Update(ctx, controlPlane) + Expect(err).ToNot(HaveOccurred(), "expected upgrade from api_and_config_map to api to succeed") + + ginkgo.By("attempting to downgrade from api to api_and_config_map should fail") + err = e2eCtx.Environment.BootstrapClusterProxy.GetClient().Get(ctx, client.ObjectKey{ + Namespace: namespace.Name, + Name: controlPlaneName, + }, controlPlane) + Expect(err).ToNot(HaveOccurred(), "failed to get control plane for downgrade attempt") + + controlPlane.Spec.AccessConfig.AuthenticationMode = ekscontrolplanev1.EKSAuthenticationModeAPIAndConfigMap + err = e2eCtx.Environment.BootstrapClusterProxy.GetClient().Update(ctx, controlPlane) + Expect(err).To(HaveOccurred(), "expected downgrade from api to api_and_config_map to fail") + + cluster := framework.GetClusterByName(ctx, framework.GetClusterByNameInput{ + Getter: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), + Namespace: namespace.Name, + Name: clusterName, + }) + Expect(cluster).NotTo(BeNil(), "couldn't find CAPI cluster") + + framework.DeleteCluster(ctx, framework.DeleteClusterInput{ + Deleter: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), + Cluster: cluster, + }) + framework.WaitForClusterDeleted(ctx, framework.WaitForClusterDeletedInput{ + ClusterProxy: e2eCtx.Environment.BootstrapClusterProxy, + Cluster: cluster, + ClusterctlConfigPath: e2eCtx.Environment.ClusterctlConfigPath, + ArtifactFolder: e2eCtx.Settings.ArtifactFolder, + }, e2eCtx.E2EConfig.GetIntervals("", "wait-delete-cluster")...) + }) + + shared.ConditionalIt(runGeneralTests, "should create a cluster with bootstrapClusterCreatorAdminPermissions disabled", func() { + ginkgo.By("should have a valid test configuration") + Expect(e2eCtx.Environment.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. BootstrapClusterProxy can't be nil") + Expect(e2eCtx.E2EConfig).ToNot(BeNil(), "Invalid argument. e2eConfig can't be nil when calling bootstrap spec") + Expect(e2eCtx.E2EConfig.Variables).To(HaveKey(shared.KubernetesVersion)) + + ctx = context.TODO() + namespace = shared.SetupSpecNamespace(ctx, "bootstrap", e2eCtx) + clusterName = fmt.Sprintf("bootstrap-%s", util.RandomString(6)) + eksClusterName := getEKSClusterName(namespace.Name, clusterName) + + ginkgo.By("should create an EKS control plane with bootstrapClusterCreatorAdminPermissions disabled") + ManagedClusterSpec(ctx, func() ManagedClusterSpecInput { + return ManagedClusterSpecInput{ + E2EConfig: e2eCtx.E2EConfig, + ConfigClusterFn: defaultConfigCluster, + BootstrapClusterProxy: e2eCtx.Environment.BootstrapClusterProxy, + AWSSession: e2eCtx.BootstrapUserAWSSession, + Namespace: namespace, + ClusterName: clusterName, + Flavour: EKSAuthBootstrapDisabledFlavor, + ControlPlaneMachineCount: 1, + WorkerMachineCount: 0, + } + }) + + ginkgo.By("EKS cluster should be active") + verifyClusterActiveAndOwned(ctx, eksClusterName, e2eCtx.BootstrapUserAWSSession) + + cluster := framework.GetClusterByName(ctx, framework.GetClusterByNameInput{ + Getter: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), + Namespace: namespace.Name, + Name: clusterName, + }) + Expect(cluster).NotTo(BeNil(), "couldn't find CAPI cluster") + + framework.DeleteCluster(ctx, framework.DeleteClusterInput{ + Deleter: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), + Cluster: cluster, + }) + framework.WaitForClusterDeleted(ctx, framework.WaitForClusterDeletedInput{ + ClusterProxy: e2eCtx.Environment.BootstrapClusterProxy, + Cluster: cluster, + ClusterctlConfigPath: e2eCtx.Environment.ClusterctlConfigPath, + ArtifactFolder: e2eCtx.Settings.ArtifactFolder, + }, e2eCtx.E2EConfig.GetIntervals("", "wait-delete-cluster")...) + }) +}) diff --git a/test/e2e/suites/managed/eks_upgrade_policy_test.go b/test/e2e/suites/managed/eks_upgrade_policy_test.go new file mode 100644 index 0000000000..16208f4b90 --- /dev/null +++ b/test/e2e/suites/managed/eks_upgrade_policy_test.go @@ -0,0 +1,137 @@ +//go:build e2e +// +build e2e + +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package managed + +import ( + "context" + "fmt" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + ekstypes "github.com/aws/aws-sdk-go-v2/service/eks/types" + "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + + ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" + "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters" + "sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared" + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/util" +) + +// EKS upgrade policy test. +var _ = ginkgo.Describe("EKS upgrade policy test", func() { + var ( + namespace *corev1.Namespace + ctx context.Context + specName = "cluster" + clusterName string + ) + + ginkgo.It("[managed] [upgrade-policy] Able to update cluster upgrade policy from STANDARD to EXTENDED", func() { + ginkgo.By("should have a valid test configuration") + Expect(e2eCtx.Environment.BootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. BootstrapClusterProxy can't be nil") + Expect(e2eCtx.E2EConfig).ToNot(BeNil(), "Invalid argument. e2eConfig can't be nil when calling %s spec", specName) + + upgradePolicy := ekscontrolplanev1.UpgradePolicyStandard + shared.SetEnvVar(shared.UpgradePolicy, upgradePolicy.String(), false) + + ctx = context.TODO() + namespace = shared.SetupSpecNamespace(ctx, specName, e2eCtx) + clusterName = fmt.Sprintf("%s-%s", specName, util.RandomString(6)) + eksClusterName := getEKSClusterName(namespace.Name, clusterName) + + ginkgo.By("default iam role should exist") + VerifyRoleExistsAndOwned(ctx, ekscontrolplanev1.DefaultEKSControlPlaneRole, eksClusterName, false, e2eCtx.AWSSession) + + getManagedClusterSpec := func() ManagedClusterSpecInput { + return ManagedClusterSpecInput{ + E2EConfig: e2eCtx.E2EConfig, + ConfigClusterFn: defaultConfigCluster, + BootstrapClusterProxy: e2eCtx.Environment.BootstrapClusterProxy, + AWSSession: e2eCtx.BootstrapUserAWSSession, + Namespace: namespace, + ClusterName: clusterName, + Flavour: EKSUpgradePolicyFlavor, + ControlPlaneMachineCount: 1, // NOTE: this cannot be zero as clusterctl returns an error + WorkerMachineCount: 0, + } + } + + ginkgo.By("should create an EKS control plane") + ManagedClusterSpec(ctx, getManagedClusterSpec) + + ginkgo.By(fmt.Sprintf("getting cluster with name %s", clusterName)) + cluster := framework.GetClusterByName(ctx, framework.GetClusterByNameInput{ + Getter: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), + Namespace: namespace.Name, + Name: clusterName, + }) + Expect(cluster).NotTo(BeNil(), "couldn't find cluster") + + WaitForEKSClusterUpgradePolicy(ctx, e2eCtx.BootstrapUserAWSSession, eksClusterName, upgradePolicy) + + changedUpgradePolicy := ekscontrolplanev1.UpgradePolicyExtended + ginkgo.By(fmt.Sprintf("Changing the UpgradePolicy from %s to %s", upgradePolicy, changedUpgradePolicy)) + shared.SetEnvVar(shared.UpgradePolicy, changedUpgradePolicy.String(), false) + ManagedClusterSpec(ctx, getManagedClusterSpec) + WaitForEKSClusterUpgradePolicy(ctx, e2eCtx.BootstrapUserAWSSession, eksClusterName, changedUpgradePolicy) + + framework.DeleteCluster(ctx, framework.DeleteClusterInput{ + Deleter: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), + Cluster: cluster, + }) + framework.WaitForClusterDeleted(ctx, framework.WaitForClusterDeletedInput{ + ClusterProxy: e2eCtx.Environment.BootstrapClusterProxy, + Cluster: cluster, + ClusterctlConfigPath: e2eCtx.Environment.ClusterctlConfigPath, + ArtifactFolder: e2eCtx.Settings.ArtifactFolder, + }, e2eCtx.E2EConfig.GetIntervals("", "wait-delete-cluster")...) + }) +}) + +func WaitForEKSClusterUpgradePolicy(ctx context.Context, sess *aws.Config, eksClusterName string, upgradePolicy ekscontrolplanev1.UpgradePolicy) { + ginkgo.By(fmt.Sprintf("Checking EKS control plane upgrade policy matches %s", upgradePolicy)) + Eventually(func() error { + cluster, err := getEKSCluster(ctx, eksClusterName, sess) + if err != nil { + smithyErr := awserrors.ParseSmithyError(err) + notFoundErr := &ekstypes.ResourceNotFoundException{} + if smithyErr.ErrorCode() == notFoundErr.ErrorCode() { + // Unrecoverable error stop trying and fail early. + return StopTrying(fmt.Sprintf("unrecoverable error: cluster %q not found: %s", eksClusterName, smithyErr.ErrorMessage())) + } + return err // For transient errors, retry + } + + expectedPolicy := converters.SupportTypeToSDK(upgradePolicy) + actualPolicy := cluster.UpgradePolicy.SupportType + + if actualPolicy != expectedPolicy { + // The upgrade policy change hasn't been reflected in EKS yet, error and try again. + return fmt.Errorf("upgrade policy mismatch: expected %s, but found %s", expectedPolicy, actualPolicy) + } + + // Success in finding the change has been reflected in EKS. + return nil + }, 5*time.Minute, 10*time.Second).Should(Succeed(), fmt.Sprintf("eventually failed checking EKS Cluster %q upgrade policy is %s", eksClusterName, upgradePolicy)) +} diff --git a/test/e2e/suites/managed/gc_test.go b/test/e2e/suites/managed/gc_test.go index f531caa3c0..46a4a30959 100644 --- a/test/e2e/suites/managed/gc_test.go +++ b/test/e2e/suites/managed/gc_test.go @@ -83,7 +83,7 @@ var _ = ginkgo.Describe("[managed] [gc] EKS Cluster external resource GC tests", ginkgo.By("getting AWSManagedControlPlane") cp := GetControlPlaneByName(ctx, GetControlPlaneByNameInput{ Getter: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), - Namespace: cluster.Spec.InfrastructureRef.Namespace, + Namespace: cluster.Namespace, Name: cluster.Spec.ControlPlaneRef.Name, }) @@ -206,7 +206,7 @@ var _ = ginkgo.Describe("[managed] [gc] EKS Cluster external resource GC tests", ginkgo.By("getting AWSManagedControlPlane") cp := GetControlPlaneByName(ctx, GetControlPlaneByNameInput{ Getter: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), - Namespace: cluster.Spec.InfrastructureRef.Namespace, + Namespace: cluster.Namespace, Name: cluster.Spec.ControlPlaneRef.Name, }) diff --git a/test/e2e/suites/managed/helpers.go b/test/e2e/suites/managed/helpers.go index 2922c70201..dce878d89a 100644 --- a/test/e2e/suites/managed/helpers.go +++ b/test/e2e/suites/managed/helpers.go @@ -48,8 +48,11 @@ const ( EKSManagedMachinePoolWithLaunchTemplateOnlyFlavor = "eks-managed-machinepool-with-launch-template-only" EKSMachinePoolOnlyFlavor = "eks-machinepool-only" EKSIPv6ClusterFlavor = "eks-ipv6-cluster" + EKSUpgradePolicyFlavor = "eks-upgrade-policy" EKSControlPlaneOnlyLegacyFlavor = "eks-control-plane-only-legacy" EKSClusterClassFlavor = "eks-clusterclass" + EKSAuthAPIAndConfigMapFlavor = "eks-auth-api-and-config-map" + EKSAuthBootstrapDisabledFlavor = "eks-auth-bootstrap-disabled" ) const ( @@ -103,9 +106,28 @@ func getEKSCluster(ctx context.Context, eksClusterName string, sess *aws.Config) } result, err := eksClient.DescribeCluster(ctx, input) + if err != nil { + return nil, err + } + return result.Cluster, err } +func verifyClusterAuthenticationMode(ctx context.Context, eksClusterName string, expectedAuthMode ekstypes.AuthenticationMode, sess *aws.Config) { + var ( + cluster *ekstypes.Cluster + err error + ) + Eventually(func() error { + cluster, err = getEKSCluster(ctx, eksClusterName, sess) + return err + }, clientRequestTimeout, clientRequestCheckInterval).Should(Succeed(), fmt.Sprintf("eventually failed trying to get EKS Cluster %q", eksClusterName)) + + Expect(cluster.AccessConfig).ToNot(BeNil(), "expecting AccessConfig to be set on the cluster") + Expect(cluster.AccessConfig.AuthenticationMode).To(BeEquivalentTo(expectedAuthMode), + fmt.Sprintf("expecting authentication mode to be %s, got %s", expectedAuthMode, cluster.AccessConfig.AuthenticationMode)) +} + func getEKSClusterAddon(ctx context.Context, eksClusterName, addonName string, sess *aws.Config) (*ekstypes.Addon, error) { eksClient := eks.NewFromConfig(*sess) diff --git a/test/e2e/suites/managed/machine_deployment.go b/test/e2e/suites/managed/machine_deployment.go index 4ef19a0f8d..11b3a74b86 100644 --- a/test/e2e/suites/managed/machine_deployment.go +++ b/test/e2e/suites/managed/machine_deployment.go @@ -30,7 +30,7 @@ import ( "k8s.io/utils/ptr" "sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" ) diff --git a/test/e2e/suites/managed/machine_deployment_helpers.go b/test/e2e/suites/managed/machine_deployment_helpers.go index e156b4ac51..be31599e37 100644 --- a/test/e2e/suites/managed/machine_deployment_helpers.go +++ b/test/e2e/suites/managed/machine_deployment_helpers.go @@ -28,7 +28,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "sigs.k8s.io/controller-runtime/pkg/client" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework" ) diff --git a/test/e2e/suites/managed/machine_pool_helpers.go b/test/e2e/suites/managed/machine_pool_helpers.go index b34eb7b1b8..1c0b595118 100644 --- a/test/e2e/suites/managed/machine_pool_helpers.go +++ b/test/e2e/suites/managed/machine_pool_helpers.go @@ -28,12 +28,12 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "sigs.k8s.io/controller-runtime/pkg/client" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework" ) type deleteMachinePoolInput struct { - MachinePool *expclusterv1.MachinePool + MachinePool *clusterv1.MachinePool Deleter framework.Deleter } @@ -43,14 +43,14 @@ func deleteMachinePool(ctx context.Context, input deleteMachinePoolInput) { } type waitForMachinePoolDeletedInput struct { - MachinePool *expclusterv1.MachinePool + MachinePool *clusterv1.MachinePool Getter framework.Getter } func waitForMachinePoolDeleted(ctx context.Context, input waitForMachinePoolDeletedInput, intervals ...interface{}) { By(fmt.Sprintf("Waiting for machine pool %s to be deleted", input.MachinePool.GetName())) Eventually(func() bool { - mp := &expclusterv1.MachinePool{} + mp := &clusterv1.MachinePool{} key := client.ObjectKey{ Namespace: input.MachinePool.GetNamespace(), Name: input.MachinePool.GetName(), diff --git a/test/e2e/suites/managed/managed_suite_test.go b/test/e2e/suites/managed/managed_suite_test.go index 15fc0d0b81..2371dfda53 100644 --- a/test/e2e/suites/managed/managed_suite_test.go +++ b/test/e2e/suites/managed/managed_suite_test.go @@ -32,8 +32,7 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) var ( @@ -90,7 +89,6 @@ func initScheme() *runtime.Scheme { _ = expinfrav1.AddToScheme(sc) _ = clusterv1.AddToScheme(sc) _ = ekscontrolplanev1.AddToScheme(sc) - _ = expclusterv1.AddToScheme(sc) return sc } diff --git a/test/e2e/suites/unmanaged/helpers_test.go b/test/e2e/suites/unmanaged/helpers_test.go index d8c626f079..6c482bf32f 100644 --- a/test/e2e/suites/unmanaged/helpers_test.go +++ b/test/e2e/suites/unmanaged/helpers_test.go @@ -51,12 +51,13 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/utils" "sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" - controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta1" + controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta2" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) // GetClusterByName returns a Cluster object given his name. @@ -295,26 +296,24 @@ func makeMachineDeployment(namespace, mdName, clusterName string, az *string, re Spec: clusterv1.MachineSpec{ ClusterName: clusterName, Bootstrap: clusterv1.Bootstrap{ - ConfigRef: &corev1.ObjectReference{ - Kind: "KubeadmConfigTemplate", - APIVersion: bootstrapv1.GroupVersion.String(), - Name: mdName, - Namespace: namespace, + ConfigRef: clusterv1.ContractVersionedObjectReference{ + Kind: "KubeadmConfigTemplate", + APIGroup: bootstrapv1.GroupVersion.Group, + Name: mdName, }, }, - InfrastructureRef: corev1.ObjectReference{ - Kind: "AWSMachineTemplate", - APIVersion: infrav1.GroupVersion.String(), - Name: mdName, - Namespace: namespace, + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSMachineTemplate", + APIGroup: infrav1.GroupVersion.Group, + Name: mdName, }, - Version: ptr.To[string](e2eCtx.E2EConfig.MustGetVariable(shared.KubernetesVersion)), + Version: e2eCtx.E2EConfig.MustGetVariable(shared.KubernetesVersion), }, }, }, } if az != nil { - machineDeployment.Spec.Template.Spec.FailureDomain = az + machineDeployment.Spec.Template.Spec.FailureDomain = *az } return machineDeployment } @@ -413,9 +412,9 @@ func LatestCIReleaseForVersion(searchVersion string) (string, error) { } type conditionAssertion struct { - conditionType clusterv1.ConditionType + conditionType clusterv1beta1.ConditionType status corev1.ConditionStatus - severity clusterv1.ConditionSeverity + severity clusterv1beta1.ConditionSeverity reason string } @@ -424,7 +423,7 @@ func hasAWSClusterConditions(m *infrav1.AWSCluster, expected []conditionAssertio return false } for _, c := range expected { - actual := conditions.Get(m, c.conditionType) + actual := v1beta1conditions.Get(m, c.conditionType) if actual == nil { return false } diff --git a/test/e2e/suites/unmanaged/unmanaged_CAPI_clusterclass_test.go b/test/e2e/suites/unmanaged/unmanaged_CAPI_clusterclass_test.go index e7f05f18fe..f712dd978d 100644 --- a/test/e2e/suites/unmanaged/unmanaged_CAPI_clusterclass_test.go +++ b/test/e2e/suites/unmanaged/unmanaged_CAPI_clusterclass_test.go @@ -117,7 +117,7 @@ var _ = ginkgo.Context("[unmanaged] [Cluster API Framework] [ClusterClass]", fun // ControlPlaneTemplate of the ClusterClass after the initial Cluster creation. // The test verifies that these fields are rolled out to the ControlPlane. ModifyControlPlaneFields: map[string]interface{}{ - "spec.machineTemplate.nodeDrainTimeout": "10s", + "spec.machineTemplate.spec.deletion.nodeDrainTimeoutSeconds": int64(10), }, // ModifyMachineDeploymentBootstrapConfigTemplateFields are the fields which will be set on the // BootstrapConfigTemplate of all MachineDeploymentClasses of the ClusterClass after the initial Cluster creation. diff --git a/test/e2e/suites/unmanaged/unmanaged_classic_elb_upgrade_test.go b/test/e2e/suites/unmanaged/unmanaged_classic_elb_upgrade_test.go index 1ef8cf8950..5ac5ba5e96 100644 --- a/test/e2e/suites/unmanaged/unmanaged_classic_elb_upgrade_test.go +++ b/test/e2e/suites/unmanaged/unmanaged_classic_elb_upgrade_test.go @@ -40,7 +40,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/bootstrap" "sigs.k8s.io/cluster-api/test/framework/clusterctl" diff --git a/test/e2e/suites/unmanaged/unmanaged_functional_test.go b/test/e2e/suites/unmanaged/unmanaged_functional_test.go index f4d6d42e94..e873a7ff35 100644 --- a/test/e2e/suites/unmanaged/unmanaged_functional_test.go +++ b/test/e2e/suites/unmanaged/unmanaged_functional_test.go @@ -41,11 +41,12 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/exp/instancestate" "sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) const TestSvc = "test-svc-" @@ -255,7 +256,7 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { }) Expect(len(workerMachines)).To(Equal(1)) - assertInstanceMetadataOptions(*workerMachines[0].Spec.ProviderID, *machineTempalte.Spec.Template.Spec.InstanceMetadataOptions) + assertInstanceMetadataOptions(workerMachines[0].Spec.ProviderID, *machineTempalte.Spec.Template.Spec.InstanceMetadataOptions) ginkgo.By("PASSED!") }) }) @@ -352,6 +353,24 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { }) Expect(len(workerMachines)).To(Equal(1)) Expect(len(controlPlaneMachines)).To(Equal(1)) + + ginkgo.By("Verifying AWSMachineTemplate capacity and nodeInfo is populated for autoscaling from zero") + Eventually(func(g Gomega) { + awsMachineTemplateList := &infrav1.AWSMachineTemplateList{} + g.Expect(e2eCtx.Environment.BootstrapClusterProxy.GetClient().List(ctx, awsMachineTemplateList, client.InNamespace(namespace.Name))).To(Succeed()) + g.Expect(awsMachineTemplateList.Items).ToNot(BeEmpty()) + + for _, template := range awsMachineTemplateList.Items { + capacity := template.Status.Capacity + _, hasCPU := capacity[corev1.ResourceCPU] + _, hasMemory := capacity[corev1.ResourceMemory] + g.Expect(hasCPU).To(BeTrue(), "Expected AWSMachineTemplate %s to have .status.capacity for CPU set", template.Name) + g.Expect(hasMemory).To(BeTrue(), "Expected AWSMachineTemplate %s to have .status.capacity for memory set", template.Name) + g.Expect(template.Status.NodeInfo).ToNot(BeNil(), "Expected AWSMachineTemplate %s to have .status.nodeInfo set", template.Name) + g.Expect(template.Status.NodeInfo.Architecture).ToNot(BeEmpty(), "Expected AWSMachineTemplate %s to have .status.nodeInfo.architecture set", template.Name) + g.Expect(template.Status.NodeInfo.OperatingSystem).ToNot(BeEmpty(), "Expected AWSMachineTemplate %s to have .status.nodeInfo.operatingSystem set", template.Name) + } + }, e2eCtx.E2EConfig.GetIntervals(specName, "wait-deployment")...).Should(Succeed()) }) }) @@ -522,7 +541,7 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { MachineDeployment: *md2[0], }) Expect(len(machines)).Should(BeNumerically(">", 0)) - terminateInstance(*machines[0].Spec.ProviderID) + terminateInstance(machines[0].Spec.ProviderID) ginkgo.By("Waiting for AWSMachine to be labelled as terminated") Eventually(func() bool { @@ -532,7 +551,7 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { }, e2eCtx.E2EConfig.GetIntervals("", "wait-machine-status")...).Should(BeTrue(), "Eventually failed waiting for AWSMachine to be labelled as terminated") ginkgo.By("Waiting for machine to reach Failed state") - statusChecks := []framework.MachineStatusCheck{framework.MachinePhaseCheck(string(clusterv1.MachinePhaseFailed))} + statusChecks := []framework.MachineStatusCheck{framework.MachinePhaseCheck(string(clusterv1.MachinePhaseFailed))} //nolint:staticcheck machineStatusInput := framework.WaitForMachineStatusCheckInput{ Getter: e2eCtx.Environment.BootstrapClusterProxy.GetClient(), Machine: &machines[0], @@ -610,7 +629,7 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { Namespace: namespace.Name, }) Expect(len(workerMachines)).To(Equal(1)) - assertSpotInstanceType(*workerMachines[0].Spec.ProviderID) + assertSpotInstanceType(workerMachines[0].Spec.ProviderID) Expect(len(controlPlaneMachines)).To(Equal(1)) }) }) @@ -942,7 +961,7 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { machineUserData, userDataFormat, err := getRawBootstrapDataWithFormat(e2eCtx.Environment.BootstrapClusterProxy.GetClient(), m) Expect(err).NotTo(HaveOccurred()) Expect(userDataFormat).To(Equal("ignition")) - assertUnencryptedUserDataIgnition(*m.Spec.ProviderID, string(machineUserData)) + assertUnencryptedUserDataIgnition(m.Spec.ProviderID, string(machineUserData)) ginkgo.By("Validating the s3 endpoint was created") vpc, err := shared.GetVPCByName(e2eCtx, clusterName+"-vpc") @@ -967,8 +986,8 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { return true } Expect(err).To(BeNil()) - return conditions.IsFalse(awsCluster, infrav1.VpcEndpointsReadyCondition) && - conditions.GetReason(awsCluster, infrav1.VpcEndpointsReadyCondition) == clusterv1.DeletedReason + return v1beta1conditions.IsFalse(awsCluster, infrav1.VpcEndpointsReadyCondition) && + v1beta1conditions.GetReason(awsCluster, infrav1.VpcEndpointsReadyCondition) == clusterv1beta1.DeletedReason }, e2eCtx.E2EConfig.GetIntervals("", "wait-delete-cluster")...).Should(BeTrue(), "Eventually failed waiting for AWSCluster to show VPC endpoint as deleted in conditions") }) diff --git a/test/helpers/envtest.go b/test/helpers/envtest.go index 43f0618b0c..60be9b5a45 100644 --- a/test/helpers/envtest.go +++ b/test/helpers/envtest.go @@ -51,7 +51,7 @@ import ( metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" "sigs.k8s.io/controller-runtime/pkg/webhook" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" logf "sigs.k8s.io/cluster-api/cmd/clusterctl/log" utilyaml "sigs.k8s.io/cluster-api/util/yaml" ) @@ -271,7 +271,8 @@ func (t *TestEnvironment) WaitForWebhooks() { timeout := 1 * time.Second for { time.Sleep(1 * time.Second) - conn, err := net.DialTimeout("tcp", net.JoinHostPort("127.0.0.1", strconv.Itoa(port)), timeout) + dialer := &net.Dialer{Timeout: timeout} + conn, err := dialer.DialContext(context.Background(), "tcp", net.JoinHostPort("127.0.0.1", strconv.Itoa(port))) if err != nil { klog.V(2).Infof("Webhook port is not ready, will retry in %v: %s", timeout, err) continue diff --git a/test/helpers/kubernetesversions/template.go b/test/helpers/kubernetesversions/template.go index aab1915763..961e4d7950 100644 --- a/test/helpers/kubernetesversions/template.go +++ b/test/helpers/kubernetesversions/template.go @@ -22,6 +22,7 @@ package kubernetesversions import ( "bytes" + "context" _ "embed" "errors" "fmt" @@ -128,7 +129,7 @@ func GenerateCIArtifactsInjectedTemplateForDebian(input GenerateCIArtifactsInjec if err := os.WriteFile(path.Join(overlayDir, "platform-kustomization.yaml"), input.PlatformKustomization, 0o600); err != nil { return "", err } - cmd := exec.Command("kustomize", "build", overlayDir) //nolint:gosec // We don't care about command injection here. + cmd := exec.CommandContext(context.TODO(), "kustomize", "build", overlayDir) //nolint:gosec // We don't care about command injection here. data, err := cmd.CombinedOutput() if err != nil { return "", err diff --git a/test/mocks/aws_ec2api_mock.go b/test/mocks/aws_ec2api_mock.go index 835e1f1660..2e666f3650 100644 --- a/test/mocks/aws_ec2api_mock.go +++ b/test/mocks/aws_ec2api_mock.go @@ -71,6 +71,26 @@ func (mr *MockEC2APIMockRecorder) AllocateAddress(arg0, arg1 interface{}, arg2 . return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllocateAddress", reflect.TypeOf((*MockEC2API)(nil).AllocateAddress), varargs...) } +// AllocateHosts mocks base method. +func (m *MockEC2API) AllocateHosts(arg0 context.Context, arg1 *ec2.AllocateHostsInput, arg2 ...func(*ec2.Options)) (*ec2.AllocateHostsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "AllocateHosts", varargs...) + ret0, _ := ret[0].(*ec2.AllocateHostsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// AllocateHosts indicates an expected call of AllocateHosts. +func (mr *MockEC2APIMockRecorder) AllocateHosts(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AllocateHosts", reflect.TypeOf((*MockEC2API)(nil).AllocateHosts), varargs...) +} + // AssociateAddress mocks base method. func (m *MockEC2API) AssociateAddress(arg0 context.Context, arg1 *ec2.AssociateAddressInput, arg2 ...func(*ec2.Options)) (*ec2.AssociateAddressOutput, error) { m.ctrl.T.Helper() @@ -771,6 +791,26 @@ func (mr *MockEC2APIMockRecorder) DescribeEgressOnlyInternetGateways(arg0, arg1 return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeEgressOnlyInternetGateways", reflect.TypeOf((*MockEC2API)(nil).DescribeEgressOnlyInternetGateways), varargs...) } +// DescribeHosts mocks base method. +func (m *MockEC2API) DescribeHosts(arg0 context.Context, arg1 *ec2.DescribeHostsInput, arg2 ...func(*ec2.Options)) (*ec2.DescribeHostsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DescribeHosts", varargs...) + ret0, _ := ret[0].(*ec2.DescribeHostsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DescribeHosts indicates an expected call of DescribeHosts. +func (mr *MockEC2APIMockRecorder) DescribeHosts(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DescribeHosts", reflect.TypeOf((*MockEC2API)(nil).DescribeHosts), varargs...) +} + // DescribeImages mocks base method. func (m *MockEC2API) DescribeImages(arg0 context.Context, arg1 *ec2.DescribeImagesInput, arg2 ...func(*ec2.Options)) (*ec2.DescribeImagesOutput, error) { m.ctrl.T.Helper() @@ -1291,6 +1331,26 @@ func (mr *MockEC2APIMockRecorder) ReleaseAddress(arg0, arg1 interface{}, arg2 .. return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReleaseAddress", reflect.TypeOf((*MockEC2API)(nil).ReleaseAddress), varargs...) } +// ReleaseHosts mocks base method. +func (m *MockEC2API) ReleaseHosts(arg0 context.Context, arg1 *ec2.ReleaseHostsInput, arg2 ...func(*ec2.Options)) (*ec2.ReleaseHostsOutput, error) { + m.ctrl.T.Helper() + varargs := []interface{}{arg0, arg1} + for _, a := range arg2 { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ReleaseHosts", varargs...) + ret0, _ := ret[0].(*ec2.ReleaseHostsOutput) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReleaseHosts indicates an expected call of ReleaseHosts. +func (mr *MockEC2APIMockRecorder) ReleaseHosts(arg0, arg1 interface{}, arg2 ...interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + varargs := append([]interface{}{arg0, arg1}, arg2...) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReleaseHosts", reflect.TypeOf((*MockEC2API)(nil).ReleaseHosts), varargs...) +} + // ReplaceRoute mocks base method. func (m *MockEC2API) ReplaceRoute(arg0 context.Context, arg1 *ec2.ReplaceRouteInput, arg2 ...func(*ec2.Options)) (*ec2.ReplaceRouteOutput, error) { m.ctrl.T.Helper() diff --git a/test/mocks/capa_clusterscoper_mock.go b/test/mocks/capa_clusterscoper_mock.go index e3664a61e0..95d85dbab1 100644 --- a/test/mocks/capa_clusterscoper_mock.go +++ b/test/mocks/capa_clusterscoper_mock.go @@ -33,7 +33,7 @@ import ( cloud "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" throttle "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" logger "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - v1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + v1beta20 "sigs.k8s.io/cluster-api/api/core/v1beta2" client "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -103,10 +103,10 @@ func (mr *MockClusterScoperMockRecorder) Close() *gomock.Call { } // ClusterObj mocks base method. -func (m *MockClusterScoper) ClusterObj() cloud.ClusterObject { +func (m *MockClusterScoper) ClusterObj() *v1beta20.Cluster { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "ClusterObj") - ret0, _ := ret[0].(cloud.ClusterObject) + ret0, _ := ret[0].(*v1beta20.Cluster) return ret0 } @@ -378,7 +378,7 @@ func (mr *MockClusterScoperMockRecorder) Session() *gomock.Call { } // SetFailureDomain mocks base method. -func (m *MockClusterScoper) SetFailureDomain(arg0 string, arg1 v1beta1.FailureDomainSpec) { +func (m *MockClusterScoper) SetFailureDomain(arg0 string, arg1 v1beta20.FailureDomain) { m.ctrl.T.Helper() m.ctrl.Call(m, "SetFailureDomain", arg0, arg1) } diff --git a/test/mocks/ocm_client_mock.go b/test/mocks/ocm_client_mock.go index 38da1767aa..a54a7e77a3 100644 --- a/test/mocks/ocm_client_mock.go +++ b/test/mocks/ocm_client_mock.go @@ -24,7 +24,7 @@ import ( reflect "reflect" gomock "github.com/golang/mock/gomock" - v1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" + v1 "github.com/openshift-online/ocm-api-model/clientapi/clustersmgmt/v1" aws "github.com/openshift/rosa/pkg/aws" ocm "github.com/openshift/rosa/pkg/ocm" ) @@ -305,6 +305,21 @@ func (mr *MockOCMClientMockRecorder) GetNodePools(arg0 interface{}) *gomock.Call return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNodePools", reflect.TypeOf((*MockOCMClient)(nil).GetNodePools), arg0) } +// GetPolicies mocks base method. +func (m *MockOCMClient) GetPolicies(arg0 string) (map[string]*v1.AWSSTSPolicy, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetPolicies", arg0) + ret0, _ := ret[0].(map[string]*v1.AWSSTSPolicy) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetPolicies indicates an expected call of GetPolicies. +func (mr *MockOCMClientMockRecorder) GetPolicies(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPolicies", reflect.TypeOf((*MockOCMClient)(nil).GetPolicies), arg0) +} + // GetUser mocks base method. func (m *MockOCMClient) GetUser(arg0, arg1, arg2 string) (*v1.User, error) { m.ctrl.T.Helper() diff --git a/util/conditions/helper.go b/util/conditions/helper.go index 2acb09093e..ec3c89e96f 100644 --- a/util/conditions/helper.go +++ b/util/conditions/helper.go @@ -18,16 +18,18 @@ limitations under the License. package conditions import ( - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + "k8s.io/utils/ptr" + + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // ErrorConditionAfterInit returns severity error, if the control plane is initialized; otherwise, returns severity warning. // Failures after control plane is initialized is likely to be non-transient, // hence conditions severities should be set to Error. -func ErrorConditionAfterInit(getter conditions.Getter) clusterv1.ConditionSeverity { - if conditions.IsTrue(getter, clusterv1.ControlPlaneInitializedCondition) { - return clusterv1.ConditionSeverityError +func ErrorConditionAfterInit(cluster *clusterv1.Cluster) clusterv1beta1.ConditionSeverity { + if ptr.Deref(cluster.Status.Initialization.ControlPlaneInitialized, false) { + return clusterv1beta1.ConditionSeverityError } - return clusterv1.ConditionSeverityWarning + return clusterv1beta1.ConditionSeverityWarning } diff --git a/util/paused/paused.go b/util/paused/paused.go index 7750ded6d6..66193b3eea 100644 --- a/util/paused/paused.go +++ b/util/paused/paused.go @@ -28,40 +28,42 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/conditions" - "sigs.k8s.io/cluster-api/util/patch" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ) // ConditionSetter combines the client.Object and Setter interface. type ConditionSetter interface { - conditions.Setter + v1beta1conditions.Setter client.Object } // EnsurePausedCondition sets the paused condition on the object and returns if it should be considered as paused. func EnsurePausedCondition(ctx context.Context, c client.Client, cluster *clusterv1.Cluster, obj ConditionSetter) (isPaused bool, conditionChanged bool, err error) { - oldCondition := conditions.Get(obj, clusterv1.PausedV1Beta2Condition) - newCondition := pausedCondition(c.Scheme(), cluster, obj, clusterv1.PausedV1Beta2Condition) + oldCondition := v1beta1conditions.Get(obj, clusterv1beta1.PausedV1Beta2Condition) + newCondition := pausedCondition(c.Scheme(), cluster, obj, string(clusterv1beta1.PausedV1Beta2Condition)) isPaused = newCondition.Status == corev1.ConditionTrue log := ctrl.LoggerFrom(ctx) // Return early if the paused condition did not change. - if oldCondition != nil && conditions.HasSameState(oldCondition, &newCondition) { + if oldCondition != nil && v1beta1conditions.HasSameState(oldCondition, &newCondition) { if isPaused { log.V(6).Info("Reconciliation is paused for this object", "reason", newCondition.Message) } return isPaused, false, nil } - patchHelper, err := patch.NewHelper(obj, c) + patchHelper, err := v1beta1patch.NewHelper(obj, c) if err != nil { return isPaused, false, err } @@ -72,10 +74,10 @@ func EnsurePausedCondition(ctx context.Context, c client.Client, cluster *cluste log.V(4).Info("Unpausing reconciliation for this object") } - conditions.Set(obj, &newCondition) + v1beta1conditions.Set(obj, &newCondition) - if err := patchHelper.Patch(ctx, obj, patch.WithOwnedV1Beta2Conditions{Conditions: []string{ - clusterv1.PausedV1Beta2Condition, + if err := patchHelper.Patch(ctx, obj, v1beta1patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ + clusterv1beta1.PausedV1Beta2Condition, }}); err != nil { return isPaused, false, err } @@ -84,10 +86,10 @@ func EnsurePausedCondition(ctx context.Context, c client.Client, cluster *cluste } // pausedCondition sets the paused condition on the object and returns if it should be considered as paused. -func pausedCondition(scheme *runtime.Scheme, cluster *clusterv1.Cluster, obj ConditionSetter, targetConditionType string) clusterv1.Condition { - if (cluster != nil && cluster.Spec.Paused) || annotations.HasPaused(obj) { +func pausedCondition(scheme *runtime.Scheme, cluster *clusterv1.Cluster, obj ConditionSetter, targetConditionType string) clusterv1beta1.Condition { + if (cluster != nil && ptr.Deref(cluster.Spec.Paused, false)) || annotations.HasPaused(obj) { var messages []string - if cluster != nil && cluster.Spec.Paused { + if cluster != nil && ptr.Deref(cluster.Spec.Paused, false) { messages = append(messages, "Cluster spec.paused is set to true") } if annotations.HasPaused(obj) { @@ -98,17 +100,17 @@ func pausedCondition(scheme *runtime.Scheme, cluster *clusterv1.Cluster, obj Con messages = append(messages, fmt.Sprintf("%s has the cluster.x-k8s.io/paused annotation", kind)) } - return clusterv1.Condition{ - Type: clusterv1.ConditionType(targetConditionType), + return clusterv1beta1.Condition{ + Type: clusterv1beta1.ConditionType(targetConditionType), Status: corev1.ConditionTrue, - Reason: clusterv1.PausedV1Beta2Reason, + Reason: clusterv1beta1.PausedV1Beta2Reason, Message: strings.Join(messages, ", "), } } - return clusterv1.Condition{ - Type: clusterv1.ConditionType(targetConditionType), + return clusterv1beta1.Condition{ + Type: clusterv1beta1.ConditionType(targetConditionType), Status: corev1.ConditionFalse, - Reason: clusterv1.NotPausedV1Beta2Reason, + Reason: clusterv1beta1.NotPausedV1Beta2Reason, } } diff --git a/util/paused/paused_test.go b/util/paused/paused_test.go index 6165263462..72e9940dca 100644 --- a/util/paused/paused_test.go +++ b/util/paused/paused_test.go @@ -24,11 +24,12 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/test/builder" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/test/builder" ) func TestEnsurePausedCondition(t *testing.T) { @@ -48,10 +49,10 @@ func TestEnsurePausedCondition(t *testing.T) { // Cluster Case 2: paused pausedCluster := normalCluster.DeepCopy() - pausedCluster.Spec.Paused = true + pausedCluster.Spec.Paused = ptr.To(true) // Object case 1: unpaused - obj := &builder.Phase1Obj{ObjectMeta: metav1.ObjectMeta{ + obj := &builder.Phase2Obj{ObjectMeta: metav1.ObjectMeta{ Name: "some-object", Namespace: "default", }} @@ -96,7 +97,7 @@ func TestEnsurePausedCondition(t *testing.T) { g := NewWithT(t) ctx := context.Background() - c := fake.NewClientBuilder().WithScheme(scheme).WithStatusSubresource(&clusterv1.Cluster{}, &builder.Phase1Obj{}). + c := fake.NewClientBuilder().WithScheme(scheme).WithStatusSubresource(&clusterv1.Cluster{}, &builder.Phase2Obj{}). WithObjects(tt.object, tt.cluster).Build() g.Expect(c.Get(ctx, client.ObjectKeyFromObject(tt.object), tt.object)).To(Succeed()) diff --git a/versions.mk b/versions.mk index 967152ce15..9f3c04ac92 100644 --- a/versions.mk +++ b/versions.mk @@ -15,9 +15,9 @@ MDBOOK_VERSION := v0.4.5 PLANTUML_VERSION := 1.2020.16 CERT_MANAGER_VERSION := v1.17.2 -CAPI_VERSION := v1.10.2 +CAPI_VERSION := v1.11.1 KPROMO_VERSION := v4.0.5 YQ_VERSION := v4.25.2 -GOLANGCI_LINT_VERSION := v2.1.0 +GOLANGCI_LINT_VERSION := v2.7.0 RELEASE_NOTES_VERSION := v0.16.5 GORELEASER_VERSION := v1.24.0