diff --git a/Makefile b/Makefile index 1112485..98fca88 100644 --- a/Makefile +++ b/Makefile @@ -71,6 +71,7 @@ E2E_ARTIFACTS ?= $(ROOT_DIR)/_artifacts E2E_CONF_FILE ?= $(ROOT_DIR)/test/e2e/config/scaleway.yaml E2E_CONF_FILE_ENVSUBST := $(ROOT_DIR)/test/e2e/config/scaleway-envsubst.yaml E2E_V1BETA1_TEMPLATES := $(ROOT_DIR)/test/e2e/data/infrastructure-scaleway/v1beta1 +E2E_FOCUS ?= "" .PHONY: setup-test-e2e setup-test-e2e: ## Set up a Kind cluster for e2e tests if it does not exist @@ -84,11 +85,12 @@ setup-test-e2e: ## Set up a Kind cluster for e2e tests if it does not exist generate-e2e: kustomize ## Generate templates for e2e $(KUSTOMIZE) build $(E2E_V1BETA1_TEMPLATES)/cluster-template --load-restrictor LoadRestrictionsNone > $(E2E_V1BETA1_TEMPLATES)/cluster-template.yaml $(KUSTOMIZE) build $(E2E_V1BETA1_TEMPLATES)/cluster-template-private-network --load-restrictor LoadRestrictionsNone > $(E2E_V1BETA1_TEMPLATES)/cluster-template-private-network.yaml + $(KUSTOMIZE) build $(E2E_V1BETA1_TEMPLATES)/cluster-template-managed --load-restrictor LoadRestrictionsNone > $(E2E_V1BETA1_TEMPLATES)/cluster-template-managed.yaml .PHONY: test-e2e test-e2e: setup-test-e2e generate-e2e docker-build envsubst ginkgo build-installer fmt vet ## Run the e2e tests. Expected an isolated environment using Kind. MANAGER_IMAGE=$(IMG) $(ENVSUBST) < $(E2E_CONF_FILE) > $(E2E_CONF_FILE_ENVSUBST) - KIND_CLUSTER=$(KIND_CLUSTER) KUBECONFIG=$(KIND_KUBECONFIG) $(GINKGO) run -v --nodes 2 ./test/e2e/ -- \ + KIND_CLUSTER=$(KIND_CLUSTER) KUBECONFIG=$(KIND_KUBECONFIG) $(GINKGO) run -v --nodes=2 --focus=$(E2E_FOCUS) ./test/e2e/ -- \ -e2e.config $(E2E_CONF_FILE_ENVSUBST) \ -e2e.use-existing-cluster \ -e2e.artifacts-folder=$(E2E_ARTIFACTS) diff --git a/PROJECT b/PROJECT index ed18943..5a32448 100644 --- a/PROJECT +++ b/PROJECT @@ -42,4 +42,31 @@ resources: kind: ScalewayMachineTemplate path: github.com/scaleway/cluster-api-provider-scaleway/api/v1alpha1 version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: cluster.x-k8s.io + group: infrastructure + kind: ScalewayManagedCluster + path: github.com/scaleway/cluster-api-provider-scaleway/api/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: cluster.x-k8s.io + group: infrastructure + kind: ScalewayManagedControlPlane + path: github.com/scaleway/cluster-api-provider-scaleway/api/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: cluster.x-k8s.io + group: infrastructure + kind: ScalewayManagedMachinePool + path: github.com/scaleway/cluster-api-provider-scaleway/api/v1alpha1 + version: v1alpha1 version: "3" diff --git a/api/v1alpha1/scalewaycluster_types.go b/api/v1alpha1/scalewaycluster_types.go index cce5709..6a530ad 100644 --- a/api/v1alpha1/scalewaycluster_types.go +++ b/api/v1alpha1/scalewaycluster_types.go @@ -35,7 +35,7 @@ type ScalewayClusterSpec struct { Network *NetworkSpec `json:"network,omitempty"` // ScalewaySecretName is the name of the secret that contains the Scaleway client parameters. - // The following keys are required: SCW_ACCESS_KEY, SCW_SECRET_KEY, SCW_DEFAULT_PROJECT_ID. + // The following keys are required: SCW_ACCESS_KEY, SCW_SECRET_KEY. // The following key is optional: SCW_API_URL. ScalewaySecretName string `json:"scalewaySecretName"` @@ -142,12 +142,6 @@ type ControlPlaneLoadBalancerSpec struct { Private *bool `json:"private,omitempty"` } -// CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). -// +kubebuilder:validation:XValidation:rule="isCIDR(self)",message="value must be a valid CIDR network address" -// +kubebuilder:validation:MaxLength:=43 -// +kubebuilder:validation:MinLength:=1 -type CIDR string - type ControlPlaneDNSSpec struct { // Domain is the DNS Zone that this record should live in. It must be pre-existing in your Scaleway account. // The format must be a string that conforms to the definition of a subdomain in DNS (RFC 1123). @@ -173,44 +167,13 @@ type ControlPlanePrivateDNSSpec struct { // +kubebuilder:validation:XValidation:rule="has(self.id) && !has(self.subnet) || !has(self.id)",message="subnet cannot be set when id is set" // +kubebuilder:validation:XValidation:rule="has(self.id) && !has(self.vpcID) || !has(self.id)",message="vpcID cannot be set when id is set" type PrivateNetworkSpec struct { + PrivateNetworkParams `json:",inline"` + // Set to true to automatically attach machines to a Private Network. // The Private Network is automatically created if no existing Private // Network ID is provided. // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" Enabled bool `json:"enabled"` - - // Set a Private Network ID to reuse an existing Private Network. - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" - // +optional - ID *string `json:"id,omitempty"` - - // Set the VPC ID where the new Private Network will be created. - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" - // +optional - VPCID *string `json:"vpcID,omitempty"` - - // Optional subnet for the Private Network. Only used on newly created Private Networks. - // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" - // +optional - Subnet *string `json:"subnet,omitempty"` -} - -// PublicGatewaySpec defines Public Gateway settings for the cluster. -type PublicGatewaySpec struct { - // Public Gateway commercial offer type. - // +kubebuilder:default="VPC-GW-S" - // +optional - Type *string `json:"type,omitempty"` - - // IP to use when creating a Public Gateway. - // +kubebuilder:validation:Format=ipv4 - // +optional - IP *string `json:"ip,omitempty"` - - // Zone where to create the Public Gateway. Must be in the same region as the - // cluster. Defaults to the first zone of the region. - // +optional - Zone *string `json:"zone,omitempty"` } // ScalewayClusterStatus defines the observed state of ScalewayCluster. diff --git a/api/v1alpha1/scalewaymanagedcluster_types.go b/api/v1alpha1/scalewaymanagedcluster_types.go new file mode 100644 index 0000000..c3d0da6 --- /dev/null +++ b/api/v1alpha1/scalewaymanagedcluster_types.go @@ -0,0 +1,119 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +) + +const ManagedClusterFinalizer = "scalewaycluster.infrastructure.cluster.x-k8s.io/smc-protection" + +// ScalewayManagedClusterSpec defines the desired state of ScalewayManagedCluster +// +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.controlPlaneEndpoint) || has(self.controlPlaneEndpoint)", message="controlPlaneEndpoint is required once set" +// +kubebuilder:validation:XValidation:rule="(has(self.network) && has(self.network.privateNetwork)) == (has(oldSelf.network) && has(oldSelf.network.privateNetwork))",message="privateNetwork cannot be added or removed" +type ScalewayManagedClusterSpec struct { + // Region where the managed cluster will be created. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" + // +kubebuilder:validation:MinLength:=2 + Region string `json:"region"` + + // ProjectID in which the managed cluster will be created. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" + // +kubebuilder:validation:MinLength:=2 + ProjectID string `json:"projectID"` + + // ScalewaySecretName is the name of the secret that contains the Scaleway client parameters. + // The following keys are required: SCW_ACCESS_KEY, SCW_SECRET_KEY. + // The following key is optional: SCW_API_URL. + // +kubebuilder:validation:MinLength:=1 + ScalewaySecretName string `json:"scalewaySecretName"` + + // Network defines the network configuration of the managed cluster. + // +optional + Network *ManagedNetworkSpec `json:"network,omitempty"` + + // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" + // +optional + ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint,omitempty,omitzero"` +} + +// ManagedNetworkSpec defines the network configuration of a managed cluster. +type ManagedNetworkSpec struct { + // PrivateNetwork allows attaching machines of the cluster to a Private Network. + // +kubebuilder:validation:XValidation:rule="has(self.vpcID) == has(oldSelf.vpcID)",message="vpcID cannot be added or removed" + // +kubebuilder:validation:XValidation:rule="has(self.id) == has(oldSelf.id)",message="id cannot be added or removed" + // +kubebuilder:validation:XValidation:rule="has(self.subnet) == has(oldSelf.subnet)",message="subnet cannot be added or removed" + // +kubebuilder:validation:XValidation:rule="has(self.id) && !has(self.subnet) || !has(self.id)",message="subnet cannot be set when id is set" + // +kubebuilder:validation:XValidation:rule="has(self.id) && !has(self.vpcID) || !has(self.id)",message="vpcID cannot be set when id is set" + // +optional + PrivateNetwork *PrivateNetworkParams `json:"privateNetwork,omitempty"` + + // PublicGateways allows to create Public Gateways that will be attached to the + // Private Network of the cluster. + // +kubebuilder:validation:MaxItems=6 + // +optional + PublicGateways []PublicGatewaySpec `json:"publicGateways,omitempty"` +} + +// ScalewayManagedClusterStatus defines the observed state of ScalewayManagedCluster. +type ScalewayManagedClusterStatus struct { + // Ready denotes that the Scaleway managed cluster infrastructure is fully provisioned. + // NOTE: this field is part of the Cluster API contract and it is used to orchestrate provisioning. + // The value of this field is never updated after provisioning is completed. + // +optional + Ready bool `json:"ready"` + + // Network contains information about currently provisioned network resources. + // +optional + Network *ManagedNetworkStatus `json:"network,omitempty"` +} + +// ManagedNetworkStatus contains information about currently provisioned network resources. +type ManagedNetworkStatus struct { + // PrivateNetworkID is the ID of the Private Network that is attached to the cluster. + // +optional + PrivateNetworkID *string `json:"privateNetworkID,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=scalewaymanagedclusters,scope=Namespaced,categories=cluster-api,shortName=smc +// +kubebuilder:subresource:status +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels.cluster\\.x-k8s\\.io/cluster-name",description="Cluster to which this ScalewayManagedCluster belongs" +// +kubebuilder:printcolumn:name="Ready",type="boolean",JSONPath=".status.ready",description="Ready is true when the managed cluster is fully provisioned" +// +kubebuilder:printcolumn:name="Region",type="string",JSONPath=".spec.region",description="Region of the managed cluster" +// +kubebuilder:printcolumn:name="Host",type="string",JSONPath=".spec.controlPlaneEndpoint.host",description="Host of the control plane" +// +kubebuilder:printcolumn:name="Port",type="integer",JSONPath=".spec.controlPlaneEndpoint.port",description="Port of the control plane" + +// ScalewayManagedCluster is the Schema for the scalewaymanagedclusters API +// +kubebuilder:validation:XValidation:rule="self.metadata.name.size() <= 63",message="name must be between 1 and 63 characters" +// +kubebuilder:validation:XValidation:rule="self.metadata.name.matches('^[a-z0-9]([-a-z0-9]*[a-z0-9])?$')",message="name must be a valid DNS label" +type ScalewayManagedCluster struct { + metav1.TypeMeta `json:",inline"` + + // metadata is a standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty,omitzero"` + + // spec defines the desired state of ScalewayManagedCluster + // +required + Spec ScalewayManagedClusterSpec `json:"spec"` + + // status defines the observed state of ScalewayManagedCluster + // +optional + Status ScalewayManagedClusterStatus `json:"status,omitempty,omitzero"` +} + +// +kubebuilder:object:root=true + +// ScalewayManagedClusterList contains a list of ScalewayManagedCluster +type ScalewayManagedClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ScalewayManagedCluster `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ScalewayManagedCluster{}, &ScalewayManagedClusterList{}) +} diff --git a/api/v1alpha1/scalewaymanagedcontrolplane_types.go b/api/v1alpha1/scalewaymanagedcontrolplane_types.go new file mode 100644 index 0000000..34ae4d2 --- /dev/null +++ b/api/v1alpha1/scalewaymanagedcontrolplane_types.go @@ -0,0 +1,284 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +) + +const ManagedControlPlaneFinalizer = "scalewaycluster.infrastructure.cluster.x-k8s.io/smcp-protection" + +// ScalewayManagedControlPlaneSpec defines the desired state of ScalewayManagedControlPlane +// +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.controlPlaneEndpoint) || has(self.controlPlaneEndpoint)", message="controlPlaneEndpoint is required once set" +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.clusterName) || has(self.clusterName) == has(oldSelf.clusterName)",message="clusterName cannot be removed once set" +// +kubebuilder:validation:XValidation:rule="has(self.cni) == has(oldSelf.cni)",message="cni cannot be added or removed" +// +kubebuilder:validation:XValidation:rule="has(self.enablePrivateEndpoint) == has(oldSelf.enablePrivateEndpoint)",message="enablePrivateEndpoint cannot be added or removed" +type ScalewayManagedControlPlaneSpec struct { + // ClusterName allows you to specify the name of the Scaleway managed cluster. + // If you don't specify a name then a default name will be created + // based on the namespace and name of the managed control plane. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" + // +kubebuilder:validation:MaxLength:=100 + // +optional + ClusterName *string `json:"clusterName,omitempty"` + + // Type of the cluster (e.g. kapsule, multicloud, etc.). + // +kubebuilder:default="kapsule" + Type string `json:"type"` + + // Version defines the desired Kubernetes version. + // +kubebuilder:validation:MinLength:=2 + Version string `json:"version"` + + // CNI plugin running in the cluster. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" + // +kubebuilder:validation:Enum=cilium;calico;kilo;none + // +optional + CNI *string `json:"cni,omitempty"` + + // AdditionalTags that will be added to the default tags. + // +optional + AdditionalTags []string `json:"additionalTags,omitempty"` + + // Autoscaler configuration of the cluster. + // +optional + Autoscaler *AutoscalerSpec `json:"autoscaler,omitempty"` + + // AutoUpgrade configuration of the cluster. + // +optional + AutoUpgrade *AutoUpgradeSpec `json:"autoUpgrade,omitempty"` + + // Feature gates to enable. + // +optional + FeatureGates []string `json:"featureGates,omitempty"` + + // Admission plugins to enable. + // +optional + AdmissionPlugins []string `json:"admissionPlugins,omitempty"` + + // OpenIDConnect defines the OpenID Connect configuration of the Kubernetes API server. + OpenIDConnect *OpenIDConnectSpec `json:"openIDConnect,omitempty"` + + // APIServerCertSANs defines additional Subject Alternative Names for the + // Kubernetes API server certificate. + // +optional + APIServerCertSANs []string `json:"apiServerCertSANs,omitempty"` + + // OnDelete configures the settings to apply when deleting the Scaleway managed cluster. + // +optional + OnDelete *OnDeleteSpec `json:"onDelete,omitempty"` + + // ACLSpec configures the ACLs of the managed cluster. If not set, ACLs + // will be set to 0.0.0.0/0. + // +optional + ACL *ACLSpec `json:"acl,omitempty"` + + // EnablePrivateEndpoint defines whether the apiserver's internal address + // is used as the cluster endpoint. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" + // +optional + EnablePrivateEndpoint *bool `json:"enablePrivateEndpoint,omitempty"` + + // ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" + // +optional + ControlPlaneEndpoint clusterv1.APIEndpoint `json:"controlPlaneEndpoint,omitempty"` +} + +// OnDeleteSpec configures the settings to apply when deleting the Scaleway managed cluster. +type OnDeleteSpec struct { + // WithAdditionalResources allows to also automatically delete all volumes + // (including those with volume type "retain"), empty Private Networks and + // Load Balancers whose names start with cluster ID. + // +optional + WithAdditionalResources *bool `json:"withAdditionalResources,omitempty"` +} + +// AutoscalerSpec allows you to set (to an extent) your preferred autoscaler configuration, +// which is an implementation of the cluster-autoscaler (https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/). +type AutoscalerSpec struct { + // Disable the cluster autoscaler. + // +optional + ScaleDownDisabled *bool `json:"scaleDownDisabled,omitempty"` + + // How long after scale up the scale down evaluation resumes. + // +optional + ScaleDownDelayAfterAdd *string `json:"scaleDownDelayAfterAdd,omitempty"` + + // Type of resource estimator to be used in scale up. + // +kubebuilder:validation:Enum=binpacking + // +optional + Estimator *string `json:"estimator,omitempty"` + + // Type of node group expander to be used in scale up. + // +kubebuilder:validation:Enum=random;most_pods;least_waste;priority;price + // +optional + Expander *string `json:"expander,omitempty"` + + // Ignore DaemonSet pods when calculating resource utilization for scaling down. + // +optional + IgnoreDaemonsetsUtilization *bool `json:"ignoreDaemonsetsUtilization,omitempty"` + + // Detect similar node groups and balance the number of nodes between them. + // +optional + BalanceSimilarNodeGroups *bool `json:"balanceSimilarNodeGroups,omitempty"` + + // Pods with priority below cutoff will be expendable. They can be killed without + // any consideration during scale down and they won't cause scale up. + // Pods with null priority (PodPriority disabled) are non expendable. + ExpendablePodsPriorityCutoff *int32 `json:"expendablePodsPriorityCutoff,omitempty"` + + // How long a node should be unneeded before it is eligible to be scaled down. + // +optional + ScaleDownUnneededTime *string `json:"scaleDownUnneededTime,omitempty"` + + // Node utilization level, defined as a sum of requested resources divided + // by capacity, below which a node can be considered for scale down. + // +kubebuilder:validation:Format="float" + // +optional + ScaleDownUtilizationThreshold *string `json:"scaleDownUtilizationThreshold,omitempty"` + + // Maximum number of seconds the cluster autoscaler waits for pod termination + // when trying to scale down a node. + // +optional + MaxGracefulTerminationSec *int32 `json:"maxGracefulTerminationSec,omitempty"` +} + +// AutoUpgradeSpec allows to set a specific 2-hour time window in which the cluster +// can be automatically updated to the latest patch version. +type AutoUpgradeSpec struct { + // Defines whether auto upgrade is enabled for the cluster. + Enabled bool `json:"enabled"` + + // Maintenance window of the cluster auto upgrades. + // +optional + MaintenanceWindow *MaintenanceWindowSpec `json:"maintenanceWindow,omitempty"` +} + +// MaintenanceWindowSpec defines the window of the cluster auto upgrades. +type MaintenanceWindowSpec struct { + // Start time of the two-hour maintenance window. + // +optional + StartHour *int32 `json:"startHour,omitempty"` + + // Day of the week for the maintenance window. + // +kubebuilder:validation:Enum=any;monday;tuesday;wednesday;thursday;friday;saturday;sunday + // +optional + Day *string `json:"day,omitempty"` +} + +// OpenIDConnectSpec defines the OpenID Connect configuration of the Kubernetes API server. +type OpenIDConnectSpec struct { + // URL of the provider which allows the API server to discover public signing keys. + // Only URLs using the https:// scheme are accepted. This is typically the provider's + // discovery URL without a path, for example "https://accounts.google.com" or "https://login.salesforce.com". + IssuerURL string `json:"issuerURL"` + + // A client ID that all tokens must be issued for. + ClientID string `json:"clientID"` + + // JWT claim to use as the user name. The default is "sub", which is expected + // to be the end user's unique identifier. Admins can choose other claims, + // such as email or name, depending on their provider. However, claims other + // than email will be prefixed with the issuer URL to prevent name collision. + // +optional + UsernameClaim *string `json:"usernameClaim,omitempty"` + + // Prefix prepended to username claims to prevent name collision (such as "system:" users). + // For example, the value "oidc:"" will create usernames like "oidc:jane.doe". + // If this flag is not provided and "username_claim" is a value other than email, + // the prefix defaults to "( Issuer URL )#" where "( Issuer URL )" is the value of "issuer_url". + // The value "-" can be used to disable all prefixing. + // +optional + UsernamePrefix *string `json:"usernamePrefix,omitempty"` + + // JWT claim to use as the user's group. + // +optional + GroupsClaim []string `json:"groupsClaim,omitempty"` + + // Prefix prepended to group claims to prevent name collision (such as "system:" groups). + // For example, the value "oidc:" will create group names like "oidc:engineering" and "oidc:infra". + // +optional + GroupsPrefix *string `json:"groupsPrefix,omitempty"` + + // Multiple key=value pairs describing a required claim in the ID token. If set, + // the claims are verified to be present in the ID token with a matching value. + // +optional + RequiredClaim []string `json:"requiredClaim,omitempty"` +} + +// ScalewayManagedControlPlaneStatus defines the observed state of ScalewayManagedControlPlane. +type ScalewayManagedControlPlaneStatus struct { + // Ready is true when the provider resource is ready. + // +optional + Ready bool `json:"ready,omitempty"` + + // Initialized is true when the control plane is available for initial contact. + // This may occur before the control plane is fully ready. + // In the AzureManagedControlPlane implementation, these are identical. + // +optional + Initialized bool `json:"initialized,omitempty"` + + // ExternalManagedControlPlane is a bool that should be set to true if the + // Node objects do not exist in the cluster. + // +kubebuilder:default=true + // +optional + ExternalManagedControlPlane bool `json:"externalManagedControlPlane,omitempty"` + + // Version represents the version of the Scaleway managed control plane. + // +optional + Version *string `json:"version,omitempty"` +} + +// ACLSpec configures the ACLs of the managed cluster. +type ACLSpec struct { + // AllowedRanges allows to set a list of allowed public IP ranges that can access + // the managed cluster. When empty, all IP ranges are DENIED. Make sure the nodes + // of your management cluster can still access the cluster by allowing their IPs. + // +kubebuilder:validation:MaxItems=30 + // +listType=set + // +optional + AllowedRanges []CIDR `json:"allowedRanges,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=scalewaymanagedcontrolplanes,scope=Namespaced,categories=cluster-api,shortName=smcp +// +kubebuilder:subresource:status +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Cluster",type="string",JSONPath=".metadata.labels.cluster\\.x-k8s\\.io/cluster-name",description="Cluster to which this ScalewayManagedControlPlane belongs" +// +kubebuilder:printcolumn:name="Ready",type="boolean",JSONPath=".status.ready",description="Ready is true when the managed cluster is fully provisioned" +// +kubebuilder:printcolumn:name="Version",type="string",JSONPath=".status.version",description="The Kubernetes version of the Scaleway control plane" +// +kubebuilder:printcolumn:name="Host",type="string",JSONPath=".spec.controlPlaneEndpoint.host",description="Host of the control plane" +// +kubebuilder:printcolumn:name="Port",type="integer",JSONPath=".spec.controlPlaneEndpoint.port",description="Port of the control plane" + +// ScalewayManagedControlPlane is the Schema for the scalewaymanagedcontrolplanes API +// +kubebuilder:validation:XValidation:rule="self.metadata.name.size() <= 63",message="name must be between 1 and 63 characters" +// +kubebuilder:validation:XValidation:rule="self.metadata.name.matches('^[a-z0-9]([-a-z0-9]*[a-z0-9])?$')",message="name must be a valid DNS label" +type ScalewayManagedControlPlane struct { + metav1.TypeMeta `json:",inline"` + + // metadata is a standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty,omitzero"` + + // spec defines the desired state of ScalewayManagedControlPlane + // +required + Spec ScalewayManagedControlPlaneSpec `json:"spec"` + + // status defines the observed state of ScalewayManagedControlPlane + // +optional + Status ScalewayManagedControlPlaneStatus `json:"status,omitempty,omitzero"` +} + +// +kubebuilder:object:root=true + +// ScalewayManagedControlPlaneList contains a list of ScalewayManagedControlPlane +type ScalewayManagedControlPlaneList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ScalewayManagedControlPlane `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ScalewayManagedControlPlane{}, &ScalewayManagedControlPlaneList{}) +} diff --git a/api/v1alpha1/scalewaymanagedmachinepool_types.go b/api/v1alpha1/scalewaymanagedmachinepool_types.go new file mode 100644 index 0000000..7af8623 --- /dev/null +++ b/api/v1alpha1/scalewaymanagedmachinepool_types.go @@ -0,0 +1,165 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ManagedMachinePoolFinalizer = "scalewaycluster.infrastructure.cluster.x-k8s.io/smmp-protection" + +// ScalewayManagedMachinePoolSpec defines the desired state of ScalewayManagedMachinePool +// +// +kubebuilder:validation:XValidation:rule="has(self.placementGroupID) == has(oldSelf.placementGroupID)",message="placementGroupID cannot be added or removed" +// +kubebuilder:validation:XValidation:rule="has(self.rootVolumeType) == has(oldSelf.rootVolumeType)",message="rootVolumeType cannot be added or removed" +// +kubebuilder:validation:XValidation:rule="has(self.rootVolumeSizeGB) == has(oldSelf.rootVolumeSizeGB)",message="rootVolumeSizeGB cannot be added or removed" +// +kubebuilder:validation:XValidation:rule="has(self.publicIPDisabled) == has(oldSelf.publicIPDisabled)",message="publicIPDisabled cannot be added or removed" +// +kubebuilder:validation:XValidation:rule="has(self.securityGroupID) == has(oldSelf.securityGroupID)",message="securityGroupID cannot be added or removed" +type ScalewayManagedMachinePoolSpec struct { + // NodeType is the type of Scaleway Instance wanted for the pool. Nodes with + // insufficient memory are not eligible (DEV1-S, PLAY2-PICO, STARDUST). + // "external" is a special node type used to provision instances from other + // cloud providers in a Kosmos Cluster. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" + // +kubebuilder:validation:MinLength:=2 + NodeType string `json:"nodeType"` + + // Zone in which the pool's nodes will be spawned. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" + // +kubebuilder:validation:MinLength:=2 + Zone string `json:"zone"` + + // PlacementGroupID in which all the nodes of the pool will be created, + // placement groups are limited to 20 instances. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" + // +optional + PlacementGroupID *string `json:"placementGroupID,omitempty"` + + // Scaling configures the scaling of the pool. + // +optional + Scaling *ScalingSpec `json:"scaling,omitempty"` + + // Autohealing defines whether the autohealing feature is enabled for the pool. + // +optional + Autohealing *bool `json:"autohealing,omitempty"` + + // AdditionalTags that will be added to the default tags. + // +optional + AdditionalTags []string `json:"additionalTags,omitempty"` + + // KubeletArgs defines Kubelet arguments to be used by this pool. + // +optional + KubeletArgs map[string]string `json:"kubeletArgs,omitempty"` + + // UpgradePolicy defines the pool's upgrade policy. + // +optional + UpgradePolicy *UpgradePolicySpec `json:"upgradePolicy,omitempty"` + + // RootVolumeType is the system volume disk type. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" + // +kubebuilder:validation:Enum=l_ssd;sbs_5k;sbs_15k + // +optional + RootVolumeType *string `json:"rootVolumeType,omitempty"` + + // RootVolumeSizeGB is the size of the System volume disk size, in GB. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" + // +optional + RootVolumeSizeGB *int64 `json:"rootVolumeSizeGB,omitempty"` + + // PublicIPDisabled defines if the public IP should be removed from Nodes. + // To use this feature, your Cluster must have an attached Private Network + // set up with a Public Gateway. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" + // +optional + PublicIPDisabled *bool `json:"publicIPDisabled,omitempty"` + + // SecurityGroupID in which all the nodes of the pool will be created. If unset, + // the pool will use default Kapsule security group in current zone. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" + // +optional + SecurityGroupID *string `json:"securityGroupID,omitempty"` + + // ProviderIDList are the provider IDs of instances in the + // managed instance group corresponding to the nodegroup represented by this + // machine pool + // +optional + ProviderIDList []string `json:"providerIDList,omitempty"` +} + +// ScalingSpec defines the scaling parameters of the pool. +type ScalingSpec struct { + // Autoscaling defines whether the autoscaling feature is enabled for the pool. + // +optional + Autoscaling *bool `json:"autoscaling,omitempty"` + + // MinSize defines the minimum size of the pool. Note that this field is only + // used when autoscaling is enabled on the pool. + // +optional + MinSize *int32 `json:"minSize,omitempty"` + + // MaxSize defines the maximum size of the pool. Note that this field is only + // used when autoscaling is enabled on the pool. + // +optional + MaxSize *int32 `json:"maxSize,omitempty"` +} + +// UpgradePolicySpec defines the pool's upgrade policy. +type UpgradePolicySpec struct { + // MaxUnavailable is the maximum number of available nodes during upgrades. + // +kubebuilder:validation:Minimum=0 + // +optional + MaxUnavailable *int32 `json:"maxUnavailable,omitempty"` + + // MaxSurge is the maximum number of additional nodes that can be provisioned + // during upgrades. + // +kubebuilder:validation:Minimum=0 + // +optional + MaxSurge *int32 `json:"maxSurge,omitempty"` +} + +// ScalewayManagedMachinePoolStatus defines the observed state of ScalewayManagedMachinePool. +type ScalewayManagedMachinePoolStatus struct { + // Ready denotes that the ScalewayManagedMachinePool has joined the cluster + // +kubebuilder:default=false + Ready bool `json:"ready"` + // Replicas is the most recently observed number of replicas. + // +optional + Replicas int32 `json:"replicas"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:resource:path=scalewaymanagedmachinepools,scope=Namespaced,categories=cluster-api,shortName=smmp +// +kubebuilder:subresource:status +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.ready" +// +kubebuilder:printcolumn:name="Replicas",type="string",JSONPath=".status.replicas" + +// ScalewayManagedMachinePool is the Schema for the scalewaymanagedmachinepools API +// +kubebuilder:validation:XValidation:rule="self.metadata.name.size() <= 63",message="name must be between 1 and 63 characters" +// +kubebuilder:validation:XValidation:rule="self.metadata.name.matches('^[a-z0-9]([-a-z0-9]*[a-z0-9])?$')",message="name must be a valid DNS label" +type ScalewayManagedMachinePool struct { + metav1.TypeMeta `json:",inline"` + + // metadata is a standard object metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty,omitzero"` + + // spec defines the desired state of ScalewayManagedMachinePool + // +required + Spec ScalewayManagedMachinePoolSpec `json:"spec"` + + // status defines the observed state of ScalewayManagedMachinePool + // +optional + Status ScalewayManagedMachinePoolStatus `json:"status,omitempty,omitzero"` +} + +// +kubebuilder:object:root=true + +// ScalewayManagedMachinePoolList contains a list of ScalewayManagedMachinePool +type ScalewayManagedMachinePoolList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ScalewayManagedMachinePool `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ScalewayManagedMachinePool{}, &ScalewayManagedMachinePoolList{}) +} diff --git a/api/v1alpha1/types.go b/api/v1alpha1/types.go new file mode 100644 index 0000000..2c423db --- /dev/null +++ b/api/v1alpha1/types.go @@ -0,0 +1,43 @@ +package v1alpha1 + +// PublicGatewaySpec defines Public Gateway settings for the cluster. +type PublicGatewaySpec struct { + // Public Gateway commercial offer type. + // +kubebuilder:default="VPC-GW-S" + // +optional + Type *string `json:"type,omitempty"` + + // IP to use when creating a Public Gateway. + // +kubebuilder:validation:Format=ipv4 + // +optional + IP *string `json:"ip,omitempty"` + + // Zone where to create the Public Gateway. Must be in the same region as the + // cluster. Defaults to the first zone of the region. + // +optional + Zone *string `json:"zone,omitempty"` +} + +// PrivateNetworkParams allows to set the params of the Private Network. +type PrivateNetworkParams struct { + // Set a Private Network ID to reuse an existing Private Network. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" + // +optional + ID *string `json:"id,omitempty"` + + // Set the VPC ID where the new Private Network will be created. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" + // +optional + VPCID *string `json:"vpcID,omitempty"` + + // Optional subnet for the Private Network. Only used on newly created Private Networks. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="Value is immutable" + // +optional + Subnet *string `json:"subnet,omitempty"` +} + +// CIDR is an IP address range in CIDR notation (for example, "10.0.0.0/8" or "fd00::/8"). +// +kubebuilder:validation:XValidation:rule="isCIDR(self)",message="value must be a valid CIDR network address" +// +kubebuilder:validation:MaxLength:=43 +// +kubebuilder:validation:MinLength:=1 +type CIDR string diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index f243301..ef96802 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -9,6 +9,111 @@ import ( "sigs.k8s.io/cluster-api/api/v1beta1" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ACLSpec) DeepCopyInto(out *ACLSpec) { + *out = *in + if in.AllowedRanges != nil { + in, out := &in.AllowedRanges, &out.AllowedRanges + *out = make([]CIDR, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ACLSpec. +func (in *ACLSpec) DeepCopy() *ACLSpec { + if in == nil { + return nil + } + out := new(ACLSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoUpgradeSpec) DeepCopyInto(out *AutoUpgradeSpec) { + *out = *in + if in.MaintenanceWindow != nil { + in, out := &in.MaintenanceWindow, &out.MaintenanceWindow + *out = new(MaintenanceWindowSpec) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoUpgradeSpec. +func (in *AutoUpgradeSpec) DeepCopy() *AutoUpgradeSpec { + if in == nil { + return nil + } + out := new(AutoUpgradeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoscalerSpec) DeepCopyInto(out *AutoscalerSpec) { + *out = *in + if in.ScaleDownDisabled != nil { + in, out := &in.ScaleDownDisabled, &out.ScaleDownDisabled + *out = new(bool) + **out = **in + } + if in.ScaleDownDelayAfterAdd != nil { + in, out := &in.ScaleDownDelayAfterAdd, &out.ScaleDownDelayAfterAdd + *out = new(string) + **out = **in + } + if in.Estimator != nil { + in, out := &in.Estimator, &out.Estimator + *out = new(string) + **out = **in + } + if in.Expander != nil { + in, out := &in.Expander, &out.Expander + *out = new(string) + **out = **in + } + if in.IgnoreDaemonsetsUtilization != nil { + in, out := &in.IgnoreDaemonsetsUtilization, &out.IgnoreDaemonsetsUtilization + *out = new(bool) + **out = **in + } + if in.BalanceSimilarNodeGroups != nil { + in, out := &in.BalanceSimilarNodeGroups, &out.BalanceSimilarNodeGroups + *out = new(bool) + **out = **in + } + if in.ExpendablePodsPriorityCutoff != nil { + in, out := &in.ExpendablePodsPriorityCutoff, &out.ExpendablePodsPriorityCutoff + *out = new(int32) + **out = **in + } + if in.ScaleDownUnneededTime != nil { + in, out := &in.ScaleDownUnneededTime, &out.ScaleDownUnneededTime + *out = new(string) + **out = **in + } + if in.ScaleDownUtilizationThreshold != nil { + in, out := &in.ScaleDownUtilizationThreshold, &out.ScaleDownUtilizationThreshold + *out = new(string) + **out = **in + } + if in.MaxGracefulTerminationSec != nil { + in, out := &in.MaxGracefulTerminationSec, &out.MaxGracefulTerminationSec + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoscalerSpec. +func (in *AutoscalerSpec) DeepCopy() *AutoscalerSpec { + if in == nil { + return nil + } + out := new(AutoscalerSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ControlPlaneDNSSpec) DeepCopyInto(out *ControlPlaneDNSSpec) { *out = *in @@ -130,6 +235,78 @@ func (in *LoadBalancerSpec) DeepCopy() *LoadBalancerSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MaintenanceWindowSpec) DeepCopyInto(out *MaintenanceWindowSpec) { + *out = *in + if in.StartHour != nil { + in, out := &in.StartHour, &out.StartHour + *out = new(int32) + **out = **in + } + if in.Day != nil { + in, out := &in.Day, &out.Day + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MaintenanceWindowSpec. +func (in *MaintenanceWindowSpec) DeepCopy() *MaintenanceWindowSpec { + if in == nil { + return nil + } + out := new(MaintenanceWindowSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedNetworkSpec) DeepCopyInto(out *ManagedNetworkSpec) { + *out = *in + if in.PrivateNetwork != nil { + in, out := &in.PrivateNetwork, &out.PrivateNetwork + *out = new(PrivateNetworkParams) + (*in).DeepCopyInto(*out) + } + if in.PublicGateways != nil { + in, out := &in.PublicGateways, &out.PublicGateways + *out = make([]PublicGatewaySpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedNetworkSpec. +func (in *ManagedNetworkSpec) DeepCopy() *ManagedNetworkSpec { + if in == nil { + return nil + } + out := new(ManagedNetworkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedNetworkStatus) DeepCopyInto(out *ManagedNetworkStatus) { + *out = *in + if in.PrivateNetworkID != nil { + in, out := &in.PrivateNetworkID, &out.PrivateNetworkID + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedNetworkStatus. +func (in *ManagedNetworkStatus) DeepCopy() *ManagedNetworkStatus { + if in == nil { + return nil + } + out := new(ManagedNetworkStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { *out = *in @@ -219,6 +396,66 @@ func (in *NetworkStatus) DeepCopy() *NetworkStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OnDeleteSpec) DeepCopyInto(out *OnDeleteSpec) { + *out = *in + if in.WithAdditionalResources != nil { + in, out := &in.WithAdditionalResources, &out.WithAdditionalResources + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OnDeleteSpec. +func (in *OnDeleteSpec) DeepCopy() *OnDeleteSpec { + if in == nil { + return nil + } + out := new(OnDeleteSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDConnectSpec) DeepCopyInto(out *OpenIDConnectSpec) { + *out = *in + if in.UsernameClaim != nil { + in, out := &in.UsernameClaim, &out.UsernameClaim + *out = new(string) + **out = **in + } + if in.UsernamePrefix != nil { + in, out := &in.UsernamePrefix, &out.UsernamePrefix + *out = new(string) + **out = **in + } + if in.GroupsClaim != nil { + in, out := &in.GroupsClaim, &out.GroupsClaim + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.GroupsPrefix != nil { + in, out := &in.GroupsPrefix, &out.GroupsPrefix + *out = new(string) + **out = **in + } + if in.RequiredClaim != nil { + in, out := &in.RequiredClaim, &out.RequiredClaim + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDConnectSpec. +func (in *OpenIDConnectSpec) DeepCopy() *OpenIDConnectSpec { + if in == nil { + return nil + } + out := new(OpenIDConnectSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PlacementGroupSpec) DeepCopyInto(out *PlacementGroupSpec) { *out = *in @@ -245,7 +482,7 @@ func (in *PlacementGroupSpec) DeepCopy() *PlacementGroupSpec { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PrivateNetworkSpec) DeepCopyInto(out *PrivateNetworkSpec) { +func (in *PrivateNetworkParams) DeepCopyInto(out *PrivateNetworkParams) { *out = *in if in.ID != nil { in, out := &in.ID, &out.ID @@ -264,6 +501,22 @@ func (in *PrivateNetworkSpec) DeepCopyInto(out *PrivateNetworkSpec) { } } +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateNetworkParams. +func (in *PrivateNetworkParams) DeepCopy() *PrivateNetworkParams { + if in == nil { + return nil + } + out := new(PrivateNetworkParams) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrivateNetworkSpec) DeepCopyInto(out *PrivateNetworkSpec) { + *out = *in + in.PrivateNetworkParams.DeepCopyInto(&out.PrivateNetworkParams) +} + // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrivateNetworkSpec. func (in *PrivateNetworkSpec) DeepCopy() *PrivateNetworkSpec { if in == nil { @@ -773,6 +1026,437 @@ func (in *ScalewayMachineTemplateSpec) DeepCopy() *ScalewayMachineTemplateSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalewayManagedCluster) DeepCopyInto(out *ScalewayManagedCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalewayManagedCluster. +func (in *ScalewayManagedCluster) DeepCopy() *ScalewayManagedCluster { + if in == nil { + return nil + } + out := new(ScalewayManagedCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ScalewayManagedCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalewayManagedClusterList) DeepCopyInto(out *ScalewayManagedClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ScalewayManagedCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalewayManagedClusterList. +func (in *ScalewayManagedClusterList) DeepCopy() *ScalewayManagedClusterList { + if in == nil { + return nil + } + out := new(ScalewayManagedClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ScalewayManagedClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalewayManagedClusterSpec) DeepCopyInto(out *ScalewayManagedClusterSpec) { + *out = *in + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(ManagedNetworkSpec) + (*in).DeepCopyInto(*out) + } + out.ControlPlaneEndpoint = in.ControlPlaneEndpoint +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalewayManagedClusterSpec. +func (in *ScalewayManagedClusterSpec) DeepCopy() *ScalewayManagedClusterSpec { + if in == nil { + return nil + } + out := new(ScalewayManagedClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalewayManagedClusterStatus) DeepCopyInto(out *ScalewayManagedClusterStatus) { + *out = *in + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(ManagedNetworkStatus) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalewayManagedClusterStatus. +func (in *ScalewayManagedClusterStatus) DeepCopy() *ScalewayManagedClusterStatus { + if in == nil { + return nil + } + out := new(ScalewayManagedClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalewayManagedControlPlane) DeepCopyInto(out *ScalewayManagedControlPlane) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalewayManagedControlPlane. +func (in *ScalewayManagedControlPlane) DeepCopy() *ScalewayManagedControlPlane { + if in == nil { + return nil + } + out := new(ScalewayManagedControlPlane) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ScalewayManagedControlPlane) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalewayManagedControlPlaneList) DeepCopyInto(out *ScalewayManagedControlPlaneList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ScalewayManagedControlPlane, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalewayManagedControlPlaneList. +func (in *ScalewayManagedControlPlaneList) DeepCopy() *ScalewayManagedControlPlaneList { + if in == nil { + return nil + } + out := new(ScalewayManagedControlPlaneList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ScalewayManagedControlPlaneList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalewayManagedControlPlaneSpec) DeepCopyInto(out *ScalewayManagedControlPlaneSpec) { + *out = *in + if in.ClusterName != nil { + in, out := &in.ClusterName, &out.ClusterName + *out = new(string) + **out = **in + } + if in.CNI != nil { + in, out := &in.CNI, &out.CNI + *out = new(string) + **out = **in + } + if in.AdditionalTags != nil { + in, out := &in.AdditionalTags, &out.AdditionalTags + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Autoscaler != nil { + in, out := &in.Autoscaler, &out.Autoscaler + *out = new(AutoscalerSpec) + (*in).DeepCopyInto(*out) + } + if in.AutoUpgrade != nil { + in, out := &in.AutoUpgrade, &out.AutoUpgrade + *out = new(AutoUpgradeSpec) + (*in).DeepCopyInto(*out) + } + if in.FeatureGates != nil { + in, out := &in.FeatureGates, &out.FeatureGates + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AdmissionPlugins != nil { + in, out := &in.AdmissionPlugins, &out.AdmissionPlugins + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.OpenIDConnect != nil { + in, out := &in.OpenIDConnect, &out.OpenIDConnect + *out = new(OpenIDConnectSpec) + (*in).DeepCopyInto(*out) + } + if in.APIServerCertSANs != nil { + in, out := &in.APIServerCertSANs, &out.APIServerCertSANs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.OnDelete != nil { + in, out := &in.OnDelete, &out.OnDelete + *out = new(OnDeleteSpec) + (*in).DeepCopyInto(*out) + } + if in.ACL != nil { + in, out := &in.ACL, &out.ACL + *out = new(ACLSpec) + (*in).DeepCopyInto(*out) + } + if in.EnablePrivateEndpoint != nil { + in, out := &in.EnablePrivateEndpoint, &out.EnablePrivateEndpoint + *out = new(bool) + **out = **in + } + out.ControlPlaneEndpoint = in.ControlPlaneEndpoint +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalewayManagedControlPlaneSpec. +func (in *ScalewayManagedControlPlaneSpec) DeepCopy() *ScalewayManagedControlPlaneSpec { + if in == nil { + return nil + } + out := new(ScalewayManagedControlPlaneSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalewayManagedControlPlaneStatus) DeepCopyInto(out *ScalewayManagedControlPlaneStatus) { + *out = *in + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalewayManagedControlPlaneStatus. +func (in *ScalewayManagedControlPlaneStatus) DeepCopy() *ScalewayManagedControlPlaneStatus { + if in == nil { + return nil + } + out := new(ScalewayManagedControlPlaneStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalewayManagedMachinePool) DeepCopyInto(out *ScalewayManagedMachinePool) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalewayManagedMachinePool. +func (in *ScalewayManagedMachinePool) DeepCopy() *ScalewayManagedMachinePool { + if in == nil { + return nil + } + out := new(ScalewayManagedMachinePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ScalewayManagedMachinePool) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalewayManagedMachinePoolList) DeepCopyInto(out *ScalewayManagedMachinePoolList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ScalewayManagedMachinePool, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalewayManagedMachinePoolList. +func (in *ScalewayManagedMachinePoolList) DeepCopy() *ScalewayManagedMachinePoolList { + if in == nil { + return nil + } + out := new(ScalewayManagedMachinePoolList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ScalewayManagedMachinePoolList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalewayManagedMachinePoolSpec) DeepCopyInto(out *ScalewayManagedMachinePoolSpec) { + *out = *in + if in.PlacementGroupID != nil { + in, out := &in.PlacementGroupID, &out.PlacementGroupID + *out = new(string) + **out = **in + } + if in.Scaling != nil { + in, out := &in.Scaling, &out.Scaling + *out = new(ScalingSpec) + (*in).DeepCopyInto(*out) + } + if in.Autohealing != nil { + in, out := &in.Autohealing, &out.Autohealing + *out = new(bool) + **out = **in + } + if in.AdditionalTags != nil { + in, out := &in.AdditionalTags, &out.AdditionalTags + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.KubeletArgs != nil { + in, out := &in.KubeletArgs, &out.KubeletArgs + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.UpgradePolicy != nil { + in, out := &in.UpgradePolicy, &out.UpgradePolicy + *out = new(UpgradePolicySpec) + (*in).DeepCopyInto(*out) + } + if in.RootVolumeType != nil { + in, out := &in.RootVolumeType, &out.RootVolumeType + *out = new(string) + **out = **in + } + if in.RootVolumeSizeGB != nil { + in, out := &in.RootVolumeSizeGB, &out.RootVolumeSizeGB + *out = new(int64) + **out = **in + } + if in.PublicIPDisabled != nil { + in, out := &in.PublicIPDisabled, &out.PublicIPDisabled + *out = new(bool) + **out = **in + } + if in.SecurityGroupID != nil { + in, out := &in.SecurityGroupID, &out.SecurityGroupID + *out = new(string) + **out = **in + } + if in.ProviderIDList != nil { + in, out := &in.ProviderIDList, &out.ProviderIDList + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalewayManagedMachinePoolSpec. +func (in *ScalewayManagedMachinePoolSpec) DeepCopy() *ScalewayManagedMachinePoolSpec { + if in == nil { + return nil + } + out := new(ScalewayManagedMachinePoolSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalewayManagedMachinePoolStatus) DeepCopyInto(out *ScalewayManagedMachinePoolStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalewayManagedMachinePoolStatus. +func (in *ScalewayManagedMachinePoolStatus) DeepCopy() *ScalewayManagedMachinePoolStatus { + if in == nil { + return nil + } + out := new(ScalewayManagedMachinePoolStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScalingSpec) DeepCopyInto(out *ScalingSpec) { + *out = *in + if in.Autoscaling != nil { + in, out := &in.Autoscaling, &out.Autoscaling + *out = new(bool) + **out = **in + } + if in.MinSize != nil { + in, out := &in.MinSize, &out.MinSize + *out = new(int32) + **out = **in + } + if in.MaxSize != nil { + in, out := &in.MaxSize, &out.MaxSize + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScalingSpec. +func (in *ScalingSpec) DeepCopy() *ScalingSpec { + if in == nil { + return nil + } + out := new(ScalingSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SecurityGroupSpec) DeepCopyInto(out *SecurityGroupSpec) { *out = *in @@ -797,3 +1481,28 @@ func (in *SecurityGroupSpec) DeepCopy() *SecurityGroupSpec { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpgradePolicySpec) DeepCopyInto(out *UpgradePolicySpec) { + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(int32) + **out = **in + } + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpgradePolicySpec. +func (in *UpgradePolicySpec) DeepCopy() *UpgradePolicySpec { + if in == nil { + return nil + } + out := new(UpgradePolicySpec) + in.DeepCopyInto(out) + return out +} diff --git a/cmd/main.go b/cmd/main.go index 48d473a..19000d9 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -11,11 +11,11 @@ import ( // to ensure that exec-entrypoint and run can make use of them. _ "k8s.io/client-go/plugin/pkg/client/auth" - internalVersion "github.com/scaleway/cluster-api-provider-scaleway/internal/version" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/certwatcher" "sigs.k8s.io/controller-runtime/pkg/healthz" @@ -26,6 +26,7 @@ import ( infrav1 "github.com/scaleway/cluster-api-provider-scaleway/api/v1alpha1" "github.com/scaleway/cluster-api-provider-scaleway/internal/controller" + internalVersion "github.com/scaleway/cluster-api-provider-scaleway/internal/version" // +kubebuilder:scaffold:imports ) @@ -37,6 +38,7 @@ var ( func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(clusterv1.AddToScheme(scheme)) + utilruntime.Must(expclusterv1.AddToScheme(scheme)) utilruntime.Must(infrav1.AddToScheme(scheme)) // +kubebuilder:scaffold:scheme @@ -212,6 +214,18 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "ScalewayMachine") os.Exit(1) } + if err := controller.NewScalewayManagedClusterReconciler(mgr.GetClient()).SetupWithManager(ctx, mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "ScalewayManagedCluster") + os.Exit(1) + } + if err := controller.NewScalewayManagedControlPlaneReconciler(mgr.GetClient()).SetupWithManager(ctx, mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "ScalewayManagedControlPlane") + os.Exit(1) + } + if err := controller.NewScalewayManagedMachinePoolReconciler(mgr.GetClient()).SetupWithManager(ctx, mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "ScalewayManagedMachinePool") + os.Exit(1) + } // +kubebuilder:scaffold:builder if metricsCertWatcher != nil { diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_scalewayclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_scalewayclusters.yaml index 33785be..c7a6ce1 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_scalewayclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_scalewayclusters.yaml @@ -330,7 +330,7 @@ spec: scalewaySecretName: description: |- ScalewaySecretName is the name of the secret that contains the Scaleway client parameters. - The following keys are required: SCW_ACCESS_KEY, SCW_SECRET_KEY, SCW_DEFAULT_PROJECT_ID. + The following keys are required: SCW_ACCESS_KEY, SCW_SECRET_KEY. The following key is optional: SCW_API_URL. type: string required: diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_scalewayclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_scalewayclustertemplates.yaml index 27e97e8..2bc5fbb 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_scalewayclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_scalewayclustertemplates.yaml @@ -332,7 +332,7 @@ spec: scalewaySecretName: description: |- ScalewaySecretName is the name of the secret that contains the Scaleway client parameters. - The following keys are required: SCW_ACCESS_KEY, SCW_SECRET_KEY, SCW_DEFAULT_PROJECT_ID. + The following keys are required: SCW_ACCESS_KEY, SCW_SECRET_KEY. The following key is optional: SCW_API_URL. type: string required: diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_scalewaymanagedclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_scalewaymanagedclusters.yaml new file mode 100644 index 0000000..7ff66bb --- /dev/null +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_scalewaymanagedclusters.yaml @@ -0,0 +1,215 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: scalewaymanagedclusters.infrastructure.cluster.x-k8s.io +spec: + group: infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: ScalewayManagedCluster + listKind: ScalewayManagedClusterList + plural: scalewaymanagedclusters + shortNames: + - smc + singular: scalewaymanagedcluster + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Cluster to which this ScalewayManagedCluster belongs + jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name + name: Cluster + type: string + - description: Ready is true when the managed cluster is fully provisioned + jsonPath: .status.ready + name: Ready + type: boolean + - description: Region of the managed cluster + jsonPath: .spec.region + name: Region + type: string + - description: Host of the control plane + jsonPath: .spec.controlPlaneEndpoint.host + name: Host + type: string + - description: Port of the control plane + jsonPath: .spec.controlPlaneEndpoint.port + name: Port + type: integer + name: v1alpha1 + schema: + openAPIV3Schema: + description: ScalewayManagedCluster is the Schema for the scalewaymanagedclusters + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec defines the desired state of ScalewayManagedCluster + properties: + controlPlaneEndpoint: + description: ControlPlaneEndpoint represents the endpoint used to + communicate with the control plane. + properties: + host: + description: host is the hostname on which the API server is serving. + maxLength: 512 + type: string + port: + description: port is the port on which the API server is serving. + format: int32 + type: integer + required: + - host + - port + type: object + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf + network: + description: Network defines the network configuration of the managed + cluster. + properties: + privateNetwork: + description: PrivateNetwork allows attaching machines of the cluster + to a Private Network. + properties: + id: + description: Set a Private Network ID to reuse an existing + Private Network. + type: string + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf + subnet: + description: Optional subnet for the Private Network. Only + used on newly created Private Networks. + type: string + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf + vpcID: + description: Set the VPC ID where the new Private Network + will be created. + type: string + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf + type: object + x-kubernetes-validations: + - message: vpcID cannot be added or removed + rule: has(self.vpcID) == has(oldSelf.vpcID) + - message: id cannot be added or removed + rule: has(self.id) == has(oldSelf.id) + - message: subnet cannot be added or removed + rule: has(self.subnet) == has(oldSelf.subnet) + - message: subnet cannot be set when id is set + rule: has(self.id) && !has(self.subnet) || !has(self.id) + - message: vpcID cannot be set when id is set + rule: has(self.id) && !has(self.vpcID) || !has(self.id) + publicGateways: + description: |- + PublicGateways allows to create Public Gateways that will be attached to the + Private Network of the cluster. + items: + description: PublicGatewaySpec defines Public Gateway settings + for the cluster. + properties: + ip: + description: IP to use when creating a Public Gateway. + format: ipv4 + type: string + type: + default: VPC-GW-S + description: Public Gateway commercial offer type. + type: string + zone: + description: |- + Zone where to create the Public Gateway. Must be in the same region as the + cluster. Defaults to the first zone of the region. + type: string + type: object + maxItems: 6 + type: array + type: object + projectID: + description: ProjectID in which the managed cluster will be created. + minLength: 2 + type: string + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf + region: + description: Region where the managed cluster will be created. + minLength: 2 + type: string + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf + scalewaySecretName: + description: |- + ScalewaySecretName is the name of the secret that contains the Scaleway client parameters. + The following keys are required: SCW_ACCESS_KEY, SCW_SECRET_KEY. + The following key is optional: SCW_API_URL. + minLength: 1 + type: string + required: + - projectID + - region + - scalewaySecretName + type: object + x-kubernetes-validations: + - message: controlPlaneEndpoint is required once set + rule: '!has(oldSelf.controlPlaneEndpoint) || has(self.controlPlaneEndpoint)' + - message: privateNetwork cannot be added or removed + rule: (has(self.network) && has(self.network.privateNetwork)) == (has(oldSelf.network) + && has(oldSelf.network.privateNetwork)) + status: + description: status defines the observed state of ScalewayManagedCluster + properties: + network: + description: Network contains information about currently provisioned + network resources. + properties: + privateNetworkID: + description: PrivateNetworkID is the ID of the Private Network + that is attached to the cluster. + type: string + type: object + ready: + description: |- + Ready denotes that the Scaleway managed cluster infrastructure is fully provisioned. + NOTE: this field is part of the Cluster API contract and it is used to orchestrate provisioning. + The value of this field is never updated after provisioning is completed. + type: boolean + type: object + required: + - spec + type: object + x-kubernetes-validations: + - message: name must be between 1 and 63 characters + rule: self.metadata.name.size() <= 63 + - message: name must be a valid DNS label + rule: self.metadata.name.matches('^[a-z0-9]([-a-z0-9]*[a-z0-9])?$') + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_scalewaymanagedcontrolplanes.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_scalewaymanagedcontrolplanes.yaml new file mode 100644 index 0000000..8c411aa --- /dev/null +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_scalewaymanagedcontrolplanes.yaml @@ -0,0 +1,361 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: scalewaymanagedcontrolplanes.infrastructure.cluster.x-k8s.io +spec: + group: infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: ScalewayManagedControlPlane + listKind: ScalewayManagedControlPlaneList + plural: scalewaymanagedcontrolplanes + shortNames: + - smcp + singular: scalewaymanagedcontrolplane + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Cluster to which this ScalewayManagedControlPlane belongs + jsonPath: .metadata.labels.cluster\.x-k8s\.io/cluster-name + name: Cluster + type: string + - description: Ready is true when the managed cluster is fully provisioned + jsonPath: .status.ready + name: Ready + type: boolean + - description: The Kubernetes version of the Scaleway control plane + jsonPath: .status.version + name: Version + type: string + - description: Host of the control plane + jsonPath: .spec.controlPlaneEndpoint.host + name: Host + type: string + - description: Port of the control plane + jsonPath: .spec.controlPlaneEndpoint.port + name: Port + type: integer + name: v1alpha1 + schema: + openAPIV3Schema: + description: ScalewayManagedControlPlane is the Schema for the scalewaymanagedcontrolplanes + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec defines the desired state of ScalewayManagedControlPlane + properties: + acl: + description: |- + ACLSpec configures the ACLs of the managed cluster. If not set, ACLs + will be set to 0.0.0.0/0. + properties: + allowedRanges: + description: |- + AllowedRanges allows to set a list of allowed public IP ranges that can access + the managed cluster. When empty, all IP ranges are DENIED. Make sure the nodes + of your management cluster can still access the cluster by allowing their IPs. + items: + description: CIDR is an IP address range in CIDR notation (for + example, "10.0.0.0/8" or "fd00::/8"). + maxLength: 43 + minLength: 1 + type: string + x-kubernetes-validations: + - message: value must be a valid CIDR network address + rule: isCIDR(self) + maxItems: 30 + type: array + x-kubernetes-list-type: set + type: object + additionalTags: + description: AdditionalTags that will be added to the default tags. + items: + type: string + type: array + admissionPlugins: + description: Admission plugins to enable. + items: + type: string + type: array + apiServerCertSANs: + description: |- + APIServerCertSANs defines additional Subject Alternative Names for the + Kubernetes API server certificate. + items: + type: string + type: array + autoUpgrade: + description: AutoUpgrade configuration of the cluster. + properties: + enabled: + description: Defines whether auto upgrade is enabled for the cluster. + type: boolean + maintenanceWindow: + description: Maintenance window of the cluster auto upgrades. + properties: + day: + description: Day of the week for the maintenance window. + enum: + - any + - monday + - tuesday + - wednesday + - thursday + - friday + - saturday + - sunday + type: string + startHour: + description: Start time of the two-hour maintenance window. + format: int32 + type: integer + type: object + required: + - enabled + type: object + autoscaler: + description: Autoscaler configuration of the cluster. + properties: + balanceSimilarNodeGroups: + description: Detect similar node groups and balance the number + of nodes between them. + type: boolean + estimator: + description: Type of resource estimator to be used in scale up. + enum: + - binpacking + type: string + expander: + description: Type of node group expander to be used in scale up. + enum: + - random + - most_pods + - least_waste + - priority + - price + type: string + expendablePodsPriorityCutoff: + description: |- + Pods with priority below cutoff will be expendable. They can be killed without + any consideration during scale down and they won't cause scale up. + Pods with null priority (PodPriority disabled) are non expendable. + format: int32 + type: integer + ignoreDaemonsetsUtilization: + description: Ignore DaemonSet pods when calculating resource utilization + for scaling down. + type: boolean + maxGracefulTerminationSec: + description: |- + Maximum number of seconds the cluster autoscaler waits for pod termination + when trying to scale down a node. + format: int32 + type: integer + scaleDownDelayAfterAdd: + description: How long after scale up the scale down evaluation + resumes. + type: string + scaleDownDisabled: + description: Disable the cluster autoscaler. + type: boolean + scaleDownUnneededTime: + description: How long a node should be unneeded before it is eligible + to be scaled down. + type: string + scaleDownUtilizationThreshold: + description: |- + Node utilization level, defined as a sum of requested resources divided + by capacity, below which a node can be considered for scale down. + format: float + type: string + type: object + clusterName: + description: |- + ClusterName allows you to specify the name of the Scaleway managed cluster. + If you don't specify a name then a default name will be created + based on the namespace and name of the managed control plane. + maxLength: 100 + type: string + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf + cni: + description: CNI plugin running in the cluster. + enum: + - cilium + - calico + - kilo + - none + type: string + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf + controlPlaneEndpoint: + description: ControlPlaneEndpoint represents the endpoint used to + communicate with the control plane. + properties: + host: + description: host is the hostname on which the API server is serving. + maxLength: 512 + type: string + port: + description: port is the port on which the API server is serving. + format: int32 + type: integer + required: + - host + - port + type: object + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf + enablePrivateEndpoint: + description: |- + EnablePrivateEndpoint defines whether the apiserver's internal address + is used as the cluster endpoint. + type: boolean + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf + featureGates: + description: Feature gates to enable. + items: + type: string + type: array + onDelete: + description: OnDelete configures the settings to apply when deleting + the Scaleway managed cluster. + properties: + withAdditionalResources: + description: |- + WithAdditionalResources allows to also automatically delete all volumes + (including those with volume type "retain"), empty Private Networks and + Load Balancers whose names start with cluster ID. + type: boolean + type: object + openIDConnect: + description: OpenIDConnect defines the OpenID Connect configuration + of the Kubernetes API server. + properties: + clientID: + description: A client ID that all tokens must be issued for. + type: string + groupsClaim: + description: JWT claim to use as the user's group. + items: + type: string + type: array + groupsPrefix: + description: |- + Prefix prepended to group claims to prevent name collision (such as "system:" groups). + For example, the value "oidc:" will create group names like "oidc:engineering" and "oidc:infra". + type: string + issuerURL: + description: |- + URL of the provider which allows the API server to discover public signing keys. + Only URLs using the https:// scheme are accepted. This is typically the provider's + discovery URL without a path, for example "https://accounts.google.com" or "https://login.salesforce.com". + type: string + requiredClaim: + description: |- + Multiple key=value pairs describing a required claim in the ID token. If set, + the claims are verified to be present in the ID token with a matching value. + items: + type: string + type: array + usernameClaim: + description: |- + JWT claim to use as the user name. The default is "sub", which is expected + to be the end user's unique identifier. Admins can choose other claims, + such as email or name, depending on their provider. However, claims other + than email will be prefixed with the issuer URL to prevent name collision. + type: string + usernamePrefix: + description: |- + Prefix prepended to username claims to prevent name collision (such as "system:" users). + For example, the value "oidc:"" will create usernames like "oidc:jane.doe". + If this flag is not provided and "username_claim" is a value other than email, + the prefix defaults to "( Issuer URL )#" where "( Issuer URL )" is the value of "issuer_url". + The value "-" can be used to disable all prefixing. + type: string + required: + - clientID + - issuerURL + type: object + type: + default: kapsule + description: Type of the cluster (e.g. kapsule, multicloud, etc.). + type: string + version: + description: Version defines the desired Kubernetes version. + minLength: 2 + type: string + required: + - type + - version + type: object + x-kubernetes-validations: + - message: controlPlaneEndpoint is required once set + rule: '!has(oldSelf.controlPlaneEndpoint) || has(self.controlPlaneEndpoint)' + - message: clusterName cannot be removed once set + rule: '!has(oldSelf.clusterName) || has(self.clusterName) == has(oldSelf.clusterName)' + - message: cni cannot be added or removed + rule: has(self.cni) == has(oldSelf.cni) + - message: enablePrivateEndpoint cannot be added or removed + rule: has(self.enablePrivateEndpoint) == has(oldSelf.enablePrivateEndpoint) + status: + description: status defines the observed state of ScalewayManagedControlPlane + properties: + externalManagedControlPlane: + default: true + description: |- + ExternalManagedControlPlane is a bool that should be set to true if the + Node objects do not exist in the cluster. + type: boolean + initialized: + description: |- + Initialized is true when the control plane is available for initial contact. + This may occur before the control plane is fully ready. + In the AzureManagedControlPlane implementation, these are identical. + type: boolean + ready: + description: Ready is true when the provider resource is ready. + type: boolean + version: + description: Version represents the version of the Scaleway managed + control plane. + type: string + type: object + required: + - spec + type: object + x-kubernetes-validations: + - message: name must be between 1 and 63 characters + rule: self.metadata.name.size() <= 63 + - message: name must be a valid DNS label + rule: self.metadata.name.matches('^[a-z0-9]([-a-z0-9]*[a-z0-9])?$') + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_scalewaymanagedmachinepools.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_scalewaymanagedmachinepools.yaml new file mode 100644 index 0000000..77c3c5b --- /dev/null +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_scalewaymanagedmachinepools.yaml @@ -0,0 +1,216 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: scalewaymanagedmachinepools.infrastructure.cluster.x-k8s.io +spec: + group: infrastructure.cluster.x-k8s.io + names: + categories: + - cluster-api + kind: ScalewayManagedMachinePool + listKind: ScalewayManagedMachinePoolList + plural: scalewaymanagedmachinepools + shortNames: + - smmp + singular: scalewaymanagedmachinepool + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.ready + name: Ready + type: string + - jsonPath: .status.replicas + name: Replicas + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: ScalewayManagedMachinePool is the Schema for the scalewaymanagedmachinepools + API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: spec defines the desired state of ScalewayManagedMachinePool + properties: + additionalTags: + description: AdditionalTags that will be added to the default tags. + items: + type: string + type: array + autohealing: + description: Autohealing defines whether the autohealing feature is + enabled for the pool. + type: boolean + kubeletArgs: + additionalProperties: + type: string + description: KubeletArgs defines Kubelet arguments to be used by this + pool. + type: object + nodeType: + description: |- + NodeType is the type of Scaleway Instance wanted for the pool. Nodes with + insufficient memory are not eligible (DEV1-S, PLAY2-PICO, STARDUST). + "external" is a special node type used to provision instances from other + cloud providers in a Kosmos Cluster. + minLength: 2 + type: string + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf + placementGroupID: + description: |- + PlacementGroupID in which all the nodes of the pool will be created, + placement groups are limited to 20 instances. + type: string + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf + providerIDList: + description: |- + ProviderIDList are the provider IDs of instances in the + managed instance group corresponding to the nodegroup represented by this + machine pool + items: + type: string + type: array + publicIPDisabled: + description: |- + PublicIPDisabled defines if the public IP should be removed from Nodes. + To use this feature, your Cluster must have an attached Private Network + set up with a Public Gateway. + type: boolean + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf + rootVolumeSizeGB: + description: RootVolumeSizeGB is the size of the System volume disk + size, in GB. + format: int64 + type: integer + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf + rootVolumeType: + description: RootVolumeType is the system volume disk type. + enum: + - l_ssd + - sbs_5k + - sbs_15k + type: string + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf + scaling: + description: Scaling configures the scaling of the pool. + properties: + autoscaling: + description: Autoscaling defines whether the autoscaling feature + is enabled for the pool. + type: boolean + maxSize: + description: |- + MaxSize defines the maximum size of the pool. Note that this field is only + used when autoscaling is enabled on the pool. + format: int32 + type: integer + minSize: + description: |- + MinSize defines the minimum size of the pool. Note that this field is only + used when autoscaling is enabled on the pool. + format: int32 + type: integer + type: object + securityGroupID: + description: |- + SecurityGroupID in which all the nodes of the pool will be created. If unset, + the pool will use default Kapsule security group in current zone. + type: string + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf + upgradePolicy: + description: UpgradePolicy defines the pool's upgrade policy. + properties: + maxSurge: + description: |- + MaxSurge is the maximum number of additional nodes that can be provisioned + during upgrades. + format: int32 + minimum: 0 + type: integer + maxUnavailable: + description: MaxUnavailable is the maximum number of available + nodes during upgrades. + format: int32 + minimum: 0 + type: integer + type: object + zone: + description: Zone in which the pool's nodes will be spawned. + minLength: 2 + type: string + x-kubernetes-validations: + - message: Value is immutable + rule: self == oldSelf + required: + - nodeType + - zone + type: object + x-kubernetes-validations: + - message: placementGroupID cannot be added or removed + rule: has(self.placementGroupID) == has(oldSelf.placementGroupID) + - message: rootVolumeType cannot be added or removed + rule: has(self.rootVolumeType) == has(oldSelf.rootVolumeType) + - message: rootVolumeSizeGB cannot be added or removed + rule: has(self.rootVolumeSizeGB) == has(oldSelf.rootVolumeSizeGB) + - message: publicIPDisabled cannot be added or removed + rule: has(self.publicIPDisabled) == has(oldSelf.publicIPDisabled) + - message: securityGroupID cannot be added or removed + rule: has(self.securityGroupID) == has(oldSelf.securityGroupID) + status: + description: status defines the observed state of ScalewayManagedMachinePool + properties: + ready: + default: false + description: Ready denotes that the ScalewayManagedMachinePool has + joined the cluster + type: boolean + replicas: + description: Replicas is the most recently observed number of replicas. + format: int32 + type: integer + required: + - ready + type: object + required: + - spec + type: object + x-kubernetes-validations: + - message: name must be between 1 and 63 characters + rule: self.metadata.name.size() <= 63 + - message: name must be a valid DNS label + rule: self.metadata.name.matches('^[a-z0-9]([-a-z0-9]*[a-z0-9])?$') + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index dcd3c5d..50e048f 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -9,6 +9,9 @@ resources: - bases/infrastructure.cluster.x-k8s.io_scalewaymachines.yaml - bases/infrastructure.cluster.x-k8s.io_scalewayclustertemplates.yaml - bases/infrastructure.cluster.x-k8s.io_scalewaymachinetemplates.yaml +- bases/infrastructure.cluster.x-k8s.io_scalewaymanagedclusters.yaml +- bases/infrastructure.cluster.x-k8s.io_scalewaymanagedcontrolplanes.yaml +- bases/infrastructure.cluster.x-k8s.io_scalewaymanagedmachinepools.yaml # +kubebuilder:scaffold:crdkustomizeresource patches: diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml index 770507a..878ffdf 100644 --- a/config/rbac/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -20,7 +20,7 @@ resources: - metrics_reader_role.yaml # For each CRD, "Admin", "Editor" and "Viewer" roles are scaffolded by # default, aiding admins in cluster management. Those roles are -# not used by the {{ .ProjectName }} itself. You can comment the following lines +# not used by the cluster-api-provider-scaleway itself. You can comment the following lines # if you do not want those helpers be installed with your Project. - scalewaymachinetemplate_admin_role.yaml - scalewaymachinetemplate_editor_role.yaml @@ -34,4 +34,12 @@ resources: - scalewaycluster_admin_role.yaml - scalewaycluster_editor_role.yaml - scalewaycluster_viewer_role.yaml - +- scalewaymanagedmachinepool_admin_role.yaml +- scalewaymanagedmachinepool_editor_role.yaml +- scalewaymanagedmachinepool_viewer_role.yaml +- scalewaymanagedcontrolplane_admin_role.yaml +- scalewaymanagedcontrolplane_editor_role.yaml +- scalewaymanagedcontrolplane_viewer_role.yaml +- scalewaymanagedcluster_admin_role.yaml +- scalewaymanagedcluster_editor_role.yaml +- scalewaymanagedcluster_viewer_role.yaml diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 1406efb..1a83336 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -9,6 +9,8 @@ rules: resources: - secrets verbs: + - create + - delete - get - list - patch @@ -19,6 +21,8 @@ rules: resources: - clusters - clusters/status + - machinepools + - machinepools/status - machines - machines/status verbs: @@ -30,6 +34,9 @@ rules: resources: - scalewayclusters - scalewaymachines + - scalewaymanagedclusters + - scalewaymanagedcontrolplanes + - scalewaymanagedmachinepools verbs: - create - delete @@ -43,6 +50,9 @@ rules: resources: - scalewayclusters/finalizers - scalewaymachines/finalizers + - scalewaymanagedclusters/finalizers + - scalewaymanagedcontrolplanes/finalizers + - scalewaymanagedmachinepools/finalizers verbs: - update - apiGroups: @@ -50,6 +60,9 @@ rules: resources: - scalewayclusters/status - scalewaymachines/status + - scalewaymanagedclusters/status + - scalewaymanagedcontrolplanes/status + - scalewaymanagedmachinepools/status verbs: - get - patch diff --git a/config/rbac/scalewaymanagedcluster_admin_role.yaml b/config/rbac/scalewaymanagedcluster_admin_role.yaml new file mode 100644 index 0000000..a0b29a7 --- /dev/null +++ b/config/rbac/scalewaymanagedcluster_admin_role.yaml @@ -0,0 +1,27 @@ +# This rule is not used by the project cluster-api-provider-scaleway itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants full permissions ('*') over infrastructure.cluster.x-k8s.io. +# This role is intended for users authorized to modify roles and bindings within the cluster, +# enabling them to delegate specific permissions to other users or groups as needed. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: cluster-api-provider-scaleway + app.kubernetes.io/managed-by: kustomize + name: scalewaymanagedcluster-admin-role +rules: +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - scalewaymanagedclusters + verbs: + - '*' +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - scalewaymanagedclusters/status + verbs: + - get diff --git a/config/rbac/scalewaymanagedcluster_editor_role.yaml b/config/rbac/scalewaymanagedcluster_editor_role.yaml new file mode 100644 index 0000000..139de25 --- /dev/null +++ b/config/rbac/scalewaymanagedcluster_editor_role.yaml @@ -0,0 +1,33 @@ +# This rule is not used by the project cluster-api-provider-scaleway itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the infrastructure.cluster.x-k8s.io. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: cluster-api-provider-scaleway + app.kubernetes.io/managed-by: kustomize + name: scalewaymanagedcluster-editor-role +rules: +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - scalewaymanagedclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - scalewaymanagedclusters/status + verbs: + - get diff --git a/config/rbac/scalewaymanagedcluster_viewer_role.yaml b/config/rbac/scalewaymanagedcluster_viewer_role.yaml new file mode 100644 index 0000000..5ec4bde --- /dev/null +++ b/config/rbac/scalewaymanagedcluster_viewer_role.yaml @@ -0,0 +1,29 @@ +# This rule is not used by the project cluster-api-provider-scaleway itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to infrastructure.cluster.x-k8s.io resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: cluster-api-provider-scaleway + app.kubernetes.io/managed-by: kustomize + name: scalewaymanagedcluster-viewer-role +rules: +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - scalewaymanagedclusters + verbs: + - get + - list + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - scalewaymanagedclusters/status + verbs: + - get diff --git a/config/rbac/scalewaymanagedcontrolplane_admin_role.yaml b/config/rbac/scalewaymanagedcontrolplane_admin_role.yaml new file mode 100644 index 0000000..a086717 --- /dev/null +++ b/config/rbac/scalewaymanagedcontrolplane_admin_role.yaml @@ -0,0 +1,27 @@ +# This rule is not used by the project cluster-api-provider-scaleway itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants full permissions ('*') over infrastructure.cluster.x-k8s.io. +# This role is intended for users authorized to modify roles and bindings within the cluster, +# enabling them to delegate specific permissions to other users or groups as needed. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: cluster-api-provider-scaleway + app.kubernetes.io/managed-by: kustomize + name: scalewaymanagedcontrolplane-admin-role +rules: +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - scalewaymanagedcontrolplanes + verbs: + - '*' +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - scalewaymanagedcontrolplanes/status + verbs: + - get diff --git a/config/rbac/scalewaymanagedcontrolplane_editor_role.yaml b/config/rbac/scalewaymanagedcontrolplane_editor_role.yaml new file mode 100644 index 0000000..2b8f086 --- /dev/null +++ b/config/rbac/scalewaymanagedcontrolplane_editor_role.yaml @@ -0,0 +1,33 @@ +# This rule is not used by the project cluster-api-provider-scaleway itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the infrastructure.cluster.x-k8s.io. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: cluster-api-provider-scaleway + app.kubernetes.io/managed-by: kustomize + name: scalewaymanagedcontrolplane-editor-role +rules: +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - scalewaymanagedcontrolplanes + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - scalewaymanagedcontrolplanes/status + verbs: + - get diff --git a/config/rbac/scalewaymanagedcontrolplane_viewer_role.yaml b/config/rbac/scalewaymanagedcontrolplane_viewer_role.yaml new file mode 100644 index 0000000..9c82aec --- /dev/null +++ b/config/rbac/scalewaymanagedcontrolplane_viewer_role.yaml @@ -0,0 +1,29 @@ +# This rule is not used by the project cluster-api-provider-scaleway itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to infrastructure.cluster.x-k8s.io resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: cluster-api-provider-scaleway + app.kubernetes.io/managed-by: kustomize + name: scalewaymanagedcontrolplane-viewer-role +rules: +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - scalewaymanagedcontrolplanes + verbs: + - get + - list + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - scalewaymanagedcontrolplanes/status + verbs: + - get diff --git a/config/rbac/scalewaymanagedmachinepool_admin_role.yaml b/config/rbac/scalewaymanagedmachinepool_admin_role.yaml new file mode 100644 index 0000000..3ecc64f --- /dev/null +++ b/config/rbac/scalewaymanagedmachinepool_admin_role.yaml @@ -0,0 +1,27 @@ +# This rule is not used by the project cluster-api-provider-scaleway itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants full permissions ('*') over infrastructure.cluster.x-k8s.io. +# This role is intended for users authorized to modify roles and bindings within the cluster, +# enabling them to delegate specific permissions to other users or groups as needed. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: cluster-api-provider-scaleway + app.kubernetes.io/managed-by: kustomize + name: scalewaymanagedmachinepool-admin-role +rules: +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - scalewaymanagedmachinepools + verbs: + - '*' +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - scalewaymanagedmachinepools/status + verbs: + - get diff --git a/config/rbac/scalewaymanagedmachinepool_editor_role.yaml b/config/rbac/scalewaymanagedmachinepool_editor_role.yaml new file mode 100644 index 0000000..abaf4b2 --- /dev/null +++ b/config/rbac/scalewaymanagedmachinepool_editor_role.yaml @@ -0,0 +1,33 @@ +# This rule is not used by the project cluster-api-provider-scaleway itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the infrastructure.cluster.x-k8s.io. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: cluster-api-provider-scaleway + app.kubernetes.io/managed-by: kustomize + name: scalewaymanagedmachinepool-editor-role +rules: +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - scalewaymanagedmachinepools + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - scalewaymanagedmachinepools/status + verbs: + - get diff --git a/config/rbac/scalewaymanagedmachinepool_viewer_role.yaml b/config/rbac/scalewaymanagedmachinepool_viewer_role.yaml new file mode 100644 index 0000000..4070e13 --- /dev/null +++ b/config/rbac/scalewaymanagedmachinepool_viewer_role.yaml @@ -0,0 +1,29 @@ +# This rule is not used by the project cluster-api-provider-scaleway itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to infrastructure.cluster.x-k8s.io resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: cluster-api-provider-scaleway + app.kubernetes.io/managed-by: kustomize + name: scalewaymanagedmachinepool-viewer-role +rules: +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - scalewaymanagedmachinepools + verbs: + - get + - list + - watch +- apiGroups: + - infrastructure.cluster.x-k8s.io + resources: + - scalewaymanagedmachinepools/status + verbs: + - get diff --git a/config/samples/infrastructure_v1alpha1_scalewaymanagedcluster.yaml b/config/samples/infrastructure_v1alpha1_scalewaymanagedcluster.yaml new file mode 100644 index 0000000..238965c --- /dev/null +++ b/config/samples/infrastructure_v1alpha1_scalewaymanagedcluster.yaml @@ -0,0 +1,9 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ScalewayManagedCluster +metadata: + labels: + app.kubernetes.io/name: cluster-api-provider-scaleway + app.kubernetes.io/managed-by: kustomize + name: scalewaymanagedcluster-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/infrastructure_v1alpha1_scalewaymanagedcontrolplane.yaml b/config/samples/infrastructure_v1alpha1_scalewaymanagedcontrolplane.yaml new file mode 100644 index 0000000..d370844 --- /dev/null +++ b/config/samples/infrastructure_v1alpha1_scalewaymanagedcontrolplane.yaml @@ -0,0 +1,9 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ScalewayManagedControlPlane +metadata: + labels: + app.kubernetes.io/name: cluster-api-provider-scaleway + app.kubernetes.io/managed-by: kustomize + name: scalewaymanagedcontrolplane-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/infrastructure_v1alpha1_scalewaymanagedmachinepool.yaml b/config/samples/infrastructure_v1alpha1_scalewaymanagedmachinepool.yaml new file mode 100644 index 0000000..1b92481 --- /dev/null +++ b/config/samples/infrastructure_v1alpha1_scalewaymanagedmachinepool.yaml @@ -0,0 +1,9 @@ +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ScalewayManagedMachinePool +metadata: + labels: + app.kubernetes.io/name: cluster-api-provider-scaleway + app.kubernetes.io/managed-by: kustomize + name: scalewaymanagedmachinepool-sample +spec: + # TODO(user): Add fields here diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 96ebf13..d8b61d5 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -4,4 +4,7 @@ resources: - infrastructure_v1alpha1_scalewaymachine.yaml - infrastructure_v1alpha1_scalewayclustertemplate.yaml - infrastructure_v1alpha1_scalewaymachinetemplate.yaml +- infrastructure_v1alpha1_scalewaymanagedcluster.yaml +- infrastructure_v1alpha1_scalewaymanagedcontrolplane.yaml +- infrastructure_v1alpha1_scalewaymanagedmachinepool.yaml # +kubebuilder:scaffold:manifestskustomizesamples diff --git a/docs/getting-started-managed.md b/docs/getting-started-managed.md new file mode 100644 index 0000000..3aadf2c --- /dev/null +++ b/docs/getting-started-managed.md @@ -0,0 +1,130 @@ +# Getting started (Kapsule / Kosmos) + +This document will help you provision a management cluster and a Scaleway managed workload cluster. + +## Setup a management cluster + +### Provision the cluster + +You can use any existing Kubernetes cluster as a management cluster. If you don't +have one, you can use one of the following methods to provision a cluster. At the +end of this section, you must have the kubeconfig of your future management cluster. + +#### Method 1: Create a Scaleway Kapsule cluster + +Follow this documentation to create a Scaleway Kapsule cluster: [Kubernetes - Quickstart](https://www.scaleway.com/en/docs/kubernetes/quickstart/) + +Make sure the `KUBECONFIG` environment variable points to the cluster's kubeconfig: + +```console +export KUBECONFIG=/path/to/your/kubeconfig +``` + +#### Method 2: Create a cluster in Docker with kind + +1. Follow this documentation to install Docker: [Install Docker Engine](https://docs.docker.com/engine/install/) +2. Follow this documentation to install kind: [Quick Start](https://kind.sigs.k8s.io/docs/user/quick-start/) +3. Create a kind cluster: + + ```console + $ kind create cluster + Creating cluster "kind" ... + ✓ Ensuring node image (kindest/node:v1.31.2) 🖼 + ✓ Preparing nodes 📦 + ✓ Writing configuration 📜 + ✓ Starting control-plane 🕹️ + ✓ Installing CNI 🔌 + ✓ Installing StorageClass 💾 + Set kubectl context to "kind-kind" + You can now use your cluster with: + + kubectl cluster-info --context kind-kind + + Have a question, bug, or feature request? Let us know! https://kind.sigs.k8s.io/#community 🙂 + ``` + +4. Get the kubeconfig: + + ```console + kind get kubeconfig > mgmt.yaml + export KUBECONFIG=mgmt.yaml + ``` + +### Install cluster API and the Scaleway provider + +1. Follow these instructions to install the `clusterctl` command-line tool: [Install clusterctl](https://cluster-api.sigs.k8s.io/user/quick-start#install-clusterctl) + +2. Initialize the management cluster: + + ```console + $ clusterctl init --infrastructure scaleway + Fetching providers + Installing cert-manager version="v1.17.2" + Waiting for cert-manager to be available... + Installing provider="cluster-api" version="v1.10.2" targetNamespace="capi-system" + Installing provider="bootstrap-kubeadm" version="v1.10.2" targetNamespace="capi-kubeadm-bootstrap-system" + Installing provider="control-plane-kubeadm" version="v1.10.2" targetNamespace="capi-kubeadm-control-plane-system" + Installing provider="infrastructure-scaleway" version="v0.1.0" targetNamespace="caps-system" + + Your management cluster has been initialized successfully! + + You can now create your first workload cluster by running the following: + + clusterctl generate cluster [name] --kubernetes-version [version] | kubectl apply -f - + ``` + +## Create a Scaleway managed workload cluster + +1. Replace the placeholder values and set the following environment variables: + + ```bash + export CLUSTER_NAME="my-cluster" + + # Scaleway credentials, project ID and region. + export SCW_ACCESS_KEY="" + export SCW_SECRET_KEY="" + export SCW_PROJECT_ID="" + export SCW_REGION="fr-par" + ``` + +2. Generate the cluster manifests (update the flags if needed): + + ```bash + clusterctl generate cluster ${CLUSTER_NAME} \ + --kubernetes-version v1.32.4 \ + --flavor managed \ + --worker-machine-count 1 > my-cluster.yaml + ``` + +3. Review and edit the `my-cluster.yaml` file as needed. + For configuring the CAPS CRDs, refer to the [ScalewayManagedCluster](scalewaymanagedcluster.md), + [ScalewayManagedControlPlane](scalewaymanagedcontrolplane.md) and + [ScalewayManagedMachinePool](scalewaymanagedmachinepool.md) documentations. +4. Apply the `my-cluster.yaml` file to create the workload cluster. +5. Wait for the cluster and machines to be ready. + + ```bash + $ clusterctl describe cluster ${CLUSTER_NAME} + NAME READY SEVERITY REASON SINCE MESSAGE + Cluster/my-cluster True 2m59s + ├─ClusterInfrastructure - ScalewayManagedCluster/my-cluster + ├─ControlPlane - ScalewayManagedControlPlane/my-cluster-control-plane + └─Workers + └─MachinePool/my-cluster-mp-0 True 2m + └─MachinePoolInfrastructure - ScalewayManagedMachinePool/my-cluster-mp-0 + ``` + +6. Fetch the kubeconfig of the cluster. + + ```bash + clusterctl get kubeconfig ${CLUSTER_NAME} > kubeconfig.yaml + export KUBECONFIG=kubeconfig.yaml + ``` + +7. List nodes. + + ```bash + $ kubectl get nodes + NAME STATUS ROLES AGE VERSION + scw-default-my-cluster-control-my-clust-c8e009 Ready 4m13s v1.32.4 + ``` diff --git a/docs/scalewaymanagedcluster.md b/docs/scalewaymanagedcluster.md new file mode 100644 index 0000000..d6220c3 --- /dev/null +++ b/docs/scalewaymanagedcluster.md @@ -0,0 +1,106 @@ +# ScalewayManagedCluster + +The `ScalewayManagedCluster` resource provisions the necessary Scaleway infrastructure +to make the Scaleway managed workload cluster work. This may include [Private Networks](https://www.scaleway.com/en/vpc/), +[Public Gateways](https://www.scaleway.com/en/public-gateway/), and more, depending on the configuration of the `ScalewayManagedCluster`. + +This document describes the various configuration options you can set to enable or disable +important features on a `ScalewayManagedCluster`. + +## Minimal ScalewayManagedCluster + +The `ScalewayManagedCluster` with the minimum options looks like this: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ScalewayManagedCluster +metadata: + name: my-cluster + namespace: default +spec: + projectID: 11111111-1111-1111-1111-111111111111 + region: fr-par + scalewaySecretName: my-scaleway-secret +``` + +The `projectID`, `region` and `scalewaySecretName` fields are **required**. + +The `projectID` and `region` fields are **immutable**, they cannot be updated after creation. + +The `scalewaySecretName` field must contain the name of an existing `Secret` inside the +namespace of the `ScalewayManagedCluster`. For more information about this secret, please refer +to the [Scaleway Secret documentation](secret.md). + +## VPC + +### Private Network + +If the `ScalewayManagedCluster` is associated with a Kapsule `ScalewayManagedControlPlane`, +a new Private Network is automatically created if none is provided. + +It is possible to re-use an existing Private Network or configure the VPC where the +Private Network will be created. + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ScalewayManagedCluster +metadata: + name: my-cluster + namespace: default +spec: + network: + privateNetwork: + # id: 11111111-1111-1111-1111-111111111111 + # vpcID: 11111111-1111-1111-1111-111111111111 + # subnet: 192.168.0.0/22 + # some fields were omitted... +``` + +- The `id` field can be set to use an existing Private Network. If not set, the provider + will create a new Private Network and manage it. +- The `vpcID` field can be set to tell the provider to create a new Private Network inside a + specific VPC. If not set, Private Networks are created in the default VPC. +- The `subnet` field can be set to use a specific subnet. Make sure the subnet does not + overlap with the subnet of another Private Network in the VPC. + +### Public Gateways + +To create `ScalewayManagedMachinePools` without a Public IP, your Private Network must contain +at least one Public Gateway that advertises its default route. You can configure one +manually or let the provider configure that for you: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ScalewayManagedCluster +metadata: + name: my-cluster + namespace: default +spec: + region: fr-par + network: + publicGateways: + - type: VPC-GW-S + zone: fr-par-1 + - type: VPC-GW-S + zone: fr-par-2 + # ip: 42.42.42.42 + # Note: the Public Gateway product is currently not available in fr-par-3. + # some fields were omitted... +``` + +The `ip` field can be set on the spec of a Public Gateway to use an existing Public IP. +If not set, a new IP will be created. + +> [!CAUTION] +> The `publicGateways` field is fully mutable, but changes should be avoided as much as possible. +> +> 🚫📶 Updating existing Public Gateways can lead to a loss of network on the nodes, be +> very careful when updating this field. +> +> 🚮 Updating a Public Gateway will lead to its re-creation, which will make its private IP change. +> The only change that won't lead to a re-creation of the Public Gateway is a type upgrade +> (e.g. VPC-GW-S to VPC-GW-M). Downgrading a Public Gateway is only possible through a re-creation. +> +> ⏳ Because the default routes are advertised via DHCP, the DHCP leases of the nodes must +> be renewed for changes to be propagated (~24 hours). You can reboot the nodes or +> re-create a new `MachinePool` to force the propagation. diff --git a/docs/scalewaymanagedcontrolplane.md b/docs/scalewaymanagedcontrolplane.md new file mode 100644 index 0000000..9f07d41 --- /dev/null +++ b/docs/scalewaymanagedcontrolplane.md @@ -0,0 +1,212 @@ +# ScalewayManagedControlPlane + +The `ScalewayManagedControlPlane` resource provisions a Scaleway Managed Kubernetes cluster +using [Kapsule](https://www.scaleway.com/en/kubernetes-kapsule/) or [Kosmos](https://www.scaleway.com/fr/kubernetes-kosmos/). + +This document describes the various configuration options you can set to configure a `ScalewayManagedControlPlane`. + +## Minimal ScalewayManagedControlPlane + +The `ScalewayManagedControlPlane` with the minimum options looks like this: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ScalewayManagedControlPlane +metadata: + name: my-cluster-control-plane + namespace: default +spec: + type: kapsule + version: v1.32.0 +``` + +The `type` field must be set to the desired cluster type (e.g. `kapsule`, `kapsule-dedicated-4`, `multicloud`, etc.). +You can list the available cluster types using the Scaleway CLI: `$ scw k8s cluster-type list`. +The cluster is automatically updated to the desired type when the `type` field is updated. + +The `version` field must be set to one of the supported Kubernetes version. +You can list the supported Kubernetes versions using the Scaleway CLI: `$ scw k8s version list`. +The cluster is automatically upgraded when the `version` field is bumped to a +version that is above the current version of the cluster. It is not possible to +downgrade the version of a cluster. + +## Additional tags + +You can configure additional tags that will be set on the created Scaleway Managed Kubernetes cluster: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ScalewayManagedControlPlane +metadata: + name: my-cluster-control-plane + namespace: default +spec: + # some fields were omitted... + additionalTags: + - "test" + - "test1" +``` + +> [!WARNING] +> Do not attempt to update the tags directly via the Scaleway API as tags will always +> be overwritten by the provider during ScalewayManagedControlPlane reconciliation. + +## ACL + +You can configure the IPs allowed to access the public endpoint of the cluster: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ScalewayManagedControlPlane +metadata: + name: my-cluster-control-plane + namespace: default +spec: + # some fields were omitted... + acl: + allowedRanges: + - "10.10.10.0/24" + - "20.20.0.0/16" +``` + +If the `acl` field is not set, the provider will ensure that the ACL rule with +IP range `0.0.0.0/0` is set on the cluster. + +> [!WARNING] +> Make sure the nodes of the management cluster are allowed to access the cluster. + +## Autoscaler configuration + +You can configure the autoscaler of the cluster: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ScalewayManagedControlPlane +metadata: + name: my-cluster-control-plane + namespace: default +spec: + # some fields were omitted... + autoscaler: + scaleDownDisabled: false + scaleDownDelayAfterAdd: 10m + expander: most_pods + ignoreDaemonsetsUtilization: false + balanceSimilarNodeGroups: false + scaleDownUtilizationThreshold: "0.5" + maxGracefulTerminationSec: 600 +``` + +## Auto Upgrade configuration + +You can set the auto upgrade configuration of the cluster: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ScalewayManagedControlPlane +metadata: + name: my-cluster-control-plane + namespace: default +spec: + # some fields were omitted... + autoUpgrade: + enabled: true + maintenanceWindow: + startHour: 0 + day: any +``` + +## Feature Gates + +You can enable Kubernetes feature gates on the cluster: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ScalewayManagedControlPlane +metadata: + name: my-cluster-control-plane + namespace: default +spec: + # some fields were omitted... + featureGates: + - "HPAScaleToZero" + - "PodLevelResources" +``` + +You can use the Scaleway CLI to list the available feature gates: `$ scw k8s version list -o json | jq`. + +## Admission Plugins + +You can enable Kubernetes admission plugins on the cluster: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ScalewayManagedControlPlane +metadata: + name: my-cluster-control-plane + namespace: default +spec: + # some fields were omitted... + admissionPlugins: + - "AlwaysPullImages" + - "PodNodeSelector" +``` + +You can use the Scaleway CLI to list the available admission plugins: `$ scw k8s version list -o json | jq`. + +## API Server Cert SANs + +You can add additional API Server Cert SANs: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ScalewayManagedControlPlane +metadata: + name: my-cluster-control-plane + namespace: default +spec: + # some fields were omitted... + apiServerCertSANs: + - "mycluster.com" +``` + +## Open ID Connect configuration + +You can set the OIDC configuration of the cluster: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ScalewayManagedControlPlane +metadata: + name: my-cluster-control-plane + namespace: default +spec: + # some fields were omitted... + openIDConnect: + issuerURL: "https://oidc-provider.example.com" + clientID: "test" + usernameClaim: "email" + usernamePrefix: "myusernameprefix" + groupsClaim: + - "groups" + groupsPrefix: "mygroupprefix" + requiredClaim: + - "yourkey=yourvalue" +``` + +## Cluster deletion behavior + +You can enable the deletion of additional resources (e.g. Load Balancers, Persistent Volumes, etc.) +when the cluster is deleted: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ScalewayManagedControlPlane +metadata: + name: my-cluster-control-plane + namespace: default +spec: + # some fields were omitted... + onDelete: + withAdditionalResources: true +``` diff --git a/docs/scalewaymanagedmachinepool.md b/docs/scalewaymanagedmachinepool.md new file mode 100644 index 0000000..9201525 --- /dev/null +++ b/docs/scalewaymanagedmachinepool.md @@ -0,0 +1,161 @@ +# ScalewayManagedMachinePool + +The `ScalewayManagedMachinePool` resource provisions a pool in a Scaleway Managed Kubernetes cluster. + +This document describes the various configuration options you can set to configure a `ScalewayManagedMachinePool`. + +## Minimal ScalewayManagedMachinePool + +The `ScalewayManagedMachinePool` with the minimum options looks like this: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ScalewayManagedMachinePool +metadata: + name: my-cluster-managed-machine-pool + namespace: default +spec: + nodeType: GP1-XS + zone: fr-srr-1 +``` + +## Additional tags + +You can configure additional tags that will be set on the created pool: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ScalewayManagedMachinePool +metadata: + name: my-cluster-managed-machine-pool + namespace: default +spec: + # some fields were omitted... + additionalTags: + - "test" + - "test1" +``` + +> [!WARNING] +> Do not attempt to update the tags directly via the Scaleway API as tags will always +> be overwritten by the provider during ScalewayManagedMachinePool reconciliation. + +## Autohealing + +You can enable autohealing in the pool: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ScalewayManagedMachinePool +metadata: + name: my-cluster-managed-machine-pool + namespace: default +spec: + # some fields were omitted... + autohealing: true +``` + +## Security Group and Placement Group + +You can set specify a Security Group and Placement Group ID during the creation of the pool: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ScalewayManagedMachinePool +metadata: + name: my-cluster-managed-machine-pool + namespace: default +spec: + # some fields were omitted... + placementGroupID: 11111111-1111-1111-1111-111111111111 + securityGroupID: 11111111-1111-1111-1111-111111111111 +``` + +## Autoscaling configuration + +You can enable autoscaling on the pool and set the min/max size of the pool: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ScalewayManagedMachinePool +metadata: + name: my-cluster-managed-machine-pool + namespace: default +spec: + # some fields were omitted... + scaling: + autoscaling: true + minSize: 0 + maxSize: 5 +``` + +## Upgrade policy + +You can set the upgrade policy of the pool: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ScalewayManagedMachinePool +metadata: + name: my-cluster-managed-machine-pool + namespace: default +spec: + # some fields were omitted... + upgradePolicy: + maxUnavailable: 0 + maxSurge: 2 +``` + +## Kubelet args + +You can set Kubelet args on the pool: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ScalewayManagedMachinePool +metadata: + name: my-cluster-managed-machine-pool + namespace: default +spec: + # some fields were omitted... + kubeletArgs: + containerLogMaxFiles: "10" + registryPullQPS: "10 +``` + +You can use the Scaleway CLI to list the available kubelet args: `$ scw k8s version list -o json | jq`. + +## Root volume configuration + +You can configure the root volume of the nodes of the pool: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ScalewayManagedMachinePool +metadata: + name: my-cluster-managed-machine-pool + namespace: default +spec: + # some fields were omitted... + rootVolumeSizeGB: 40 + rootVolumeType: "sbs_15k" +``` + +## Full isolation pool + +You can disable adding a public IP on the nodes of the pool: + +```yaml +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ScalewayManagedMachinePool +metadata: + name: my-cluster-managed-machine-pool + namespace: default +spec: + # some fields were omitted... + publicIPDisabled: true +``` + +Setting `publicIPDisabled: true` is only possible with a Kapsule cluster. +The Private Network of the cluster must also have at least one public gateway that +advertises a default route. diff --git a/docs/secret.md b/docs/secret.md index ad8ded8..71e0e33 100644 --- a/docs/secret.md +++ b/docs/secret.md @@ -31,6 +31,7 @@ Your Scaleway API Key must have the following permission sets: - `BlockStorageFullAccess` - `DomainsDNSFullAccess` - `InstancesFullAccess` +- `KubernetesFullAccess` If a permission set is missing, you may encounter reconcile errors in the logs of the provider. diff --git a/go.mod b/go.mod index cd77632..0227ba0 100644 --- a/go.mod +++ b/go.mod @@ -3,10 +3,12 @@ module github.com/scaleway/cluster-api-provider-scaleway go 1.24.0 require ( + github.com/Masterminds/semver/v3 v3.3.0 github.com/onsi/ginkgo/v2 v2.23.4 github.com/onsi/gomega v1.37.0 - github.com/scaleway/scaleway-sdk-go v1.0.0-beta.33 + github.com/scaleway/scaleway-sdk-go v1.0.0-beta.34.0.20250808131040-125b97d90073 go.uber.org/mock v0.5.2 + golang.org/x/crypto v0.40.0 k8s.io/api v0.32.6 k8s.io/apimachinery v0.32.6 k8s.io/client-go v0.32.6 @@ -24,7 +26,6 @@ require ( github.com/BurntSushi/toml v1.4.0 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver/v3 v3.3.0 // indirect github.com/Masterminds/sprig/v3 v3.3.0 // indirect github.com/Microsoft/go-winio v0.5.0 // indirect github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 // indirect @@ -119,16 +120,15 @@ require ( go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.36.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect - golang.org/x/net v0.38.0 // indirect + golang.org/x/net v0.42.0 // indirect golang.org/x/oauth2 v0.28.0 // indirect - golang.org/x/sync v0.12.0 // indirect - golang.org/x/sys v0.32.0 // indirect - golang.org/x/term v0.30.0 // indirect - golang.org/x/text v0.23.0 // indirect + golang.org/x/sync v0.16.0 // indirect + golang.org/x/sys v0.34.0 // indirect + golang.org/x/term v0.33.0 // indirect + golang.org/x/text v0.28.0 // indirect golang.org/x/time v0.9.0 // indirect - golang.org/x/tools v0.31.0 // indirect + golang.org/x/tools v0.35.0 // indirect gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 // indirect diff --git a/go.sum b/go.sum index 4c85ded..8097a27 100644 --- a/go.sum +++ b/go.sum @@ -223,8 +223,8 @@ github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWN github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo= github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.33 h1:KhF0WejiUTDbL5X55nXowP7zNopwpowa6qaMAWyIE+0= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.33/go.mod h1:792k1RTU+5JeMXm35/e2Wgp71qPH/DmDoZrRc+EFZDk= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.34.0.20250808131040-125b97d90073 h1:4muj/o8l5ONVGzmzaGgQtniCG9Zvnn3Teek28wcOMpg= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.34.0.20250808131040-125b97d90073/go.mod h1:2Cfo14o/ZO3hZg9GjbyD/BHKbyri3K5BiEHq4fBcUHY= github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= @@ -304,8 +304,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34= -golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= +golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM= +golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -315,15 +315,15 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= -golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs= +golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8= golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc= golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= -golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -334,23 +334,23 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= -golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= +golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= -golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg= +golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= -golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.31.0 h1:0EedkvKDbh+qistFTd0Bcwe/YLh4vHwWEkiI0toFIBU= -golang.org/x/tools v0.31.0/go.mod h1:naFTU+Cev749tSJRXJlna0T3WxKvb1kWEx15xA4SdmQ= +golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0= +golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/internal/controller/controller.go b/internal/controller/controller.go new file mode 100644 index 0000000..5ac8adb --- /dev/null +++ b/internal/controller/controller.go @@ -0,0 +1,5 @@ +package controller + +import "time" + +const DefaultRetryTime = 30 * time.Second diff --git a/internal/controller/helpers.go b/internal/controller/helpers.go new file mode 100644 index 0000000..f9f9ec3 --- /dev/null +++ b/internal/controller/helpers.go @@ -0,0 +1,102 @@ +package controller + +import ( + "context" + "fmt" + "slices" + + infrav1 "github.com/scaleway/cluster-api-provider-scaleway/api/v1alpha1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" +) + +var ( + scalewaySecretOwnerAPIVersion = infrav1.GroupVersion.String() + scalewaySecretOwnerKinds = []string{"ScalewayCluster", "ScalewayManagedCluster"} +) + +// claimScalewaySecret adds an object as owner of a secret. It also adds a finalizer +// (if not present already) to prevent the removal of the secret. +func claimScalewaySecret(ctx context.Context, c client.Client, owner client.Object, secretName string) error { + gvk, err := apiutil.GVKForObject(owner, c.Scheme()) + if err != nil { + return fmt.Errorf("failed to get GVK for owner: %w", err) + } + + if !slices.Contains(scalewaySecretOwnerKinds, gvk.Kind) { + return fmt.Errorf("object with kind %s cannot own scaleway secret", gvk.Kind) + } + + secret := &corev1.Secret{} + if err := c.Get(ctx, client.ObjectKey{ + Name: secretName, + Namespace: owner.GetNamespace(), + }, secret); err != nil { + return err + } + + secretHelper, err := patch.NewHelper(secret, c) + if err != nil { + return fmt.Errorf("failed to create patch helper for secret: %w", err) + } + + controllerutil.AddFinalizer(secret, SecretFinalizer) + + if err := controllerutil.SetOwnerReference(owner, secret, c.Scheme()); err != nil { + return fmt.Errorf("failed to set owner reference for secret %s: %w", secret.Name, err) + } + + return secretHelper.Patch(ctx, secret) +} + +// releaseScalewaySecret removes an object as owner of a secret. It also removes +// the finalizer it there is no owner anymore. +func releaseScalewaySecret(ctx context.Context, c client.Client, owner client.Object, secretName string) error { + gvk, err := apiutil.GVKForObject(owner, c.Scheme()) + if err != nil { + return fmt.Errorf("failed to get GVK for owner: %w", err) + } + + if !slices.Contains(scalewaySecretOwnerKinds, gvk.Kind) { + return fmt.Errorf("object with kind %s cannot own scaleway secret", gvk.Kind) + } + + secret := &corev1.Secret{} + if err := c.Get(ctx, client.ObjectKey{ + Name: secretName, + Namespace: owner.GetNamespace(), + }, secret); err != nil { + if apierrors.IsNotFound(err) { + return nil + } + + return err + } + + secretHelper, err := patch.NewHelper(secret, c) + if err != nil { + return fmt.Errorf("failed to create patch helper for secret: %w", err) + } + + hasOwnerReference, err := controllerutil.HasOwnerReference(secret.OwnerReferences, owner, c.Scheme()) + if err != nil { + return fmt.Errorf("failed to check owner refenrece for secret %s: %w", secret.Name, err) + } + + if hasOwnerReference { + if err := controllerutil.RemoveOwnerReference(owner, secret, c.Scheme()); err != nil { + return fmt.Errorf("failed to remove owner reference for secret %s: %w", secret.Name, err) + } + } + + if !util.HasOwner(secret.OwnerReferences, scalewaySecretOwnerAPIVersion, scalewaySecretOwnerKinds) { + controllerutil.RemoveFinalizer(secret, SecretFinalizer) + } + + return secretHelper.Patch(ctx, secret) +} diff --git a/internal/controller/scalewaycluster_controller.go b/internal/controller/scalewaycluster_controller.go index dea8828..5dbe17e 100644 --- a/internal/controller/scalewaycluster_controller.go +++ b/internal/controller/scalewaycluster_controller.go @@ -5,17 +5,14 @@ import ( "errors" "fmt" - corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" logf "sigs.k8s.io/controller-runtime/pkg/log" @@ -98,7 +95,7 @@ func (r *ScalewayClusterReconciler) Reconcile(ctx context.Context, req ctrl.Requ return ctrl.Result{}, nil } - if err := r.claimScalewaySecret(ctx, scalewayCluster); err != nil { + if err := claimScalewaySecret(ctx, r, scalewayCluster, scalewayCluster.Spec.ScalewaySecretName); err != nil { return ctrl.Result{}, fmt.Errorf("unable to claim ScalewaySecret: %w", err) } @@ -132,7 +129,7 @@ func (r *ScalewayClusterReconciler) reconcileDelete(ctx context.Context, cluster // Cluster is deleted so remove the finalizer. controllerutil.RemoveFinalizer(scalewayCluster, infrav1.ClusterFinalizer) - if err := r.releaseScalewaySecret(ctx, scalewayCluster); err != nil { + if err := releaseScalewaySecret(ctx, r, scalewayCluster, scalewayCluster.Spec.ScalewaySecretName); err != nil { return ctrl.Result{}, err } @@ -201,67 +198,3 @@ func (r *ScalewayClusterReconciler) SetupWithManager(ctx context.Context, mgr ct Named("scalewaycluster"). Complete(r) } - -func (r *ScalewayClusterReconciler) claimScalewaySecret(ctx context.Context, scalewayCluster *infrav1.ScalewayCluster) error { - secret := &corev1.Secret{} - if err := r.Client.Get(ctx, client.ObjectKey{ - Name: scalewayCluster.Spec.ScalewaySecretName, - Namespace: scalewayCluster.Namespace, - }, secret); err != nil { - return err - } - - secretHelper, err := patch.NewHelper(secret, r.Client) - if err != nil { - return fmt.Errorf("failed to create patch helper for secret: %w", err) - } - - controllerutil.AddFinalizer(secret, SecretFinalizer) - - if err := controllerutil.SetOwnerReference(scalewayCluster, secret, r.Client.Scheme()); err != nil { - return fmt.Errorf("failed to set owner reference for secret %s: %w", secret.Name, err) - } - - return secretHelper.Patch(ctx, secret) -} - -func (r *ScalewayClusterReconciler) releaseScalewaySecret(ctx context.Context, scalewayCluster *infrav1.ScalewayCluster) error { - secret := &corev1.Secret{} - if err := r.Client.Get(ctx, client.ObjectKey{ - Name: scalewayCluster.Spec.ScalewaySecretName, - Namespace: scalewayCluster.Namespace, - }, secret); err != nil { - return err - } - - secretHelper, err := patch.NewHelper(secret, r.Client) - if err != nil { - if apierrors.IsNotFound(err) { - return nil - } - - return fmt.Errorf("failed to create patch helper for secret: %w", err) - } - - hasOwnerReference, err := controllerutil.HasOwnerReference(secret.OwnerReferences, scalewayCluster, r.Scheme()) - if err != nil { - return fmt.Errorf("failed to check owner refenrece for secret %s: %w", secret.Name, err) - } - - if hasOwnerReference { - if err := controllerutil.RemoveOwnerReference(scalewayCluster, secret, r.Client.Scheme()); err != nil { - return fmt.Errorf("failed to remove owner reference for secret %s: %w", secret.Name, err) - } - } - - gvk, err := apiutil.GVKForObject(scalewayCluster, r.Scheme()) - if err != nil { - return fmt.Errorf("failed to get GVK for ScalewayCluster: %w", err) - } - - if !util.HasOwner(secret.OwnerReferences, gvk.GroupVersion().String(), []string{gvk.Kind}) { - controllerutil.RemoveFinalizer(secret, SecretFinalizer) - } - - return secretHelper.Patch(ctx, secret) -} diff --git a/internal/controller/scalewaycluster_controller_test.go b/internal/controller/scalewaycluster_controller_test.go index ee63f94..a47e8a0 100644 --- a/internal/controller/scalewaycluster_controller_test.go +++ b/internal/controller/scalewaycluster_controller_test.go @@ -204,7 +204,7 @@ var _ = Describe("ScalewayCluster", func() { err := k8sClient.Get(ctx, typeNamespacedName, resource) Expect(err).NotTo(HaveOccurred()) - resource.Spec.ControlPlaneEndpoint.Host = "12.12.12.12" + resource.Spec.ControlPlaneEndpoint.Host = "11.11.11.11" Expect(k8sClient.Update(ctx, resource)).NotTo(Succeed()) }) diff --git a/internal/controller/scalewaymanagedcluster_controller.go b/internal/controller/scalewaymanagedcluster_controller.go new file mode 100644 index 0000000..cfa57ae --- /dev/null +++ b/internal/controller/scalewaymanagedcluster_controller.go @@ -0,0 +1,280 @@ +package controller + +import ( + "context" + "errors" + "fmt" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/annotations" + "sigs.k8s.io/cluster-api/util/predicates" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + infrav1 "github.com/scaleway/cluster-api-provider-scaleway/api/v1alpha1" + "github.com/scaleway/cluster-api-provider-scaleway/internal/scope" + "github.com/scaleway/cluster-api-provider-scaleway/internal/service/scaleway" +) + +// ScalewayManagedClusterReconciler reconciles a ScalewayManagedCluster object +type ScalewayManagedClusterReconciler struct { + client.Client + + createScalewayManagedClusterService scalewayManagedClusterServiceCreator +} + +// scalewayManagedClusterServiceCreator is a function that creates a new scalewayManagedClusterService reconciler. +type scalewayManagedClusterServiceCreator func(*scope.ManagedCluster) *scalewayManagedClusterService + +func NewScalewayManagedClusterReconciler(c client.Client) *ScalewayManagedClusterReconciler { + return &ScalewayManagedClusterReconciler{ + Client: c, + createScalewayManagedClusterService: newScalewayManagedClusterService, + } +} + +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=scalewaymanagedclusters,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=scalewaymanagedclusters/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=scalewaymanagedclusters/finalizers,verbs=update +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=scalewaymanagedcontrolplanes,verbs=get;list;watch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;update;patch + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +func (r *ScalewayManagedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, retErr error) { + log := logf.FromContext(ctx) + + managedCluster := &infrav1.ScalewayManagedCluster{} + if err := r.Get(ctx, req.NamespacedName, managedCluster); err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + + // Fetch the Cluster. + cluster, err := util.GetOwnerCluster(ctx, r.Client, managedCluster.ObjectMeta) + if err != nil { + return ctrl.Result{}, err + } + if cluster == nil { + log.Info("Cluster Controller has not yet set OwnerRef") + return ctrl.Result{}, nil + } + + if annotations.IsPaused(cluster, managedCluster) { + log.Info("ScalewayManagedCluster or linked Cluster is marked as paused. Won't reconcile") + return ctrl.Result{}, nil + } + + log = log.WithValues("cluster", cluster.Name) + + if cluster.Spec.ControlPlaneRef == nil { + return ctrl.Result{}, errors.New("missing controlPlaneRef in cluster spec") + } + controlPlane := &infrav1.ScalewayManagedControlPlane{} + controlPlaneRef := types.NamespacedName{ + Name: cluster.Spec.ControlPlaneRef.Name, + Namespace: cluster.Namespace, + } + + if err := r.Get(ctx, controlPlaneRef, controlPlane); err != nil { + if !apierrors.IsNotFound(err) || managedCluster.DeletionTimestamp.IsZero() { + return ctrl.Result{}, fmt.Errorf("failed to get control plane ref: %w", err) + } + controlPlane = nil + } + + log = log.WithValues("controlPlane", controlPlaneRef.Name) + ctx = logf.IntoContext(ctx, log) + + managedClusterScope, err := scope.NewManagedCluster(ctx, &scope.ManagedClusterParams{ + Client: r.Client, + ManagedCluster: managedCluster, + ManagedControlPlane: controlPlane, + }) + if err != nil { + return ctrl.Result{}, err + } + + defer func() { + if err := managedClusterScope.Close(ctx); err != nil && retErr == nil { + retErr = err + } + }() + + if err := claimScalewaySecret(ctx, r, managedCluster, managedCluster.Spec.ScalewaySecretName); err != nil { + return ctrl.Result{}, fmt.Errorf("unable to claim ScalewaySecret: %w", err) + } + + if !managedCluster.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, managedClusterScope) + } + + return r.reconcileNormal(ctx, managedClusterScope) +} + +func (r *ScalewayManagedClusterReconciler) reconcileNormal(ctx context.Context, s *scope.ManagedCluster) (ctrl.Result, error) { + log := logf.FromContext(ctx) + + log.Info("Reconciling ScalewayManagedCluster") + managedCluster := s.ManagedCluster + + // Register our finalizer immediately to avoid orphaning Scaleway resources on delete + if controllerutil.AddFinalizer(managedCluster, infrav1.ManagedClusterFinalizer) { + if err := s.PatchObject(ctx); err != nil { + return ctrl.Result{}, err + } + } + + if err := r.createScalewayManagedClusterService(s).Reconcile(ctx); err != nil { + // Handle terminal & transient errors + var reconcileError *scaleway.ReconcileError + if errors.As(err, &reconcileError) { + if reconcileError.IsTerminal() { + log.Error(err, "Failed to reconcile ScalewayManagedCluster") + return ctrl.Result{}, nil + } else if reconcileError.IsTransient() { + log.Info(fmt.Sprintf("Transient failure to reconcile ScalewayManagedCluster, retrying: %s", reconcileError.Error())) + return ctrl.Result{RequeueAfter: reconcileError.RequeueAfter()}, nil + } + } + + return ctrl.Result{}, fmt.Errorf("failed to reconcile cluster services: %w", err) + } + + // Infrastructure must be ready before control plane. We should also enqueue + // requests from control plane to infra cluster to keep control plane endpoint accurate. + s.ManagedCluster.Status.Ready = true + s.ManagedCluster.Spec.ControlPlaneEndpoint = s.ManagedControlPlane.Spec.ControlPlaneEndpoint + + return ctrl.Result{}, nil +} + +func (r *ScalewayManagedClusterReconciler) reconcileDelete(ctx context.Context, s *scope.ManagedCluster) (ctrl.Result, error) { + log := logf.FromContext(ctx) + + log.Info("Reconciling ScalewayManagedCluster delete") + + numDependencies, err := r.dependencyCount(ctx, s) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to get cluster dependencies: %w", err) + } + if numDependencies > 0 { + log.V(4).Info("Scaleway managed cluster still has dependencies - requeue needed", "dependencyCount", numDependencies) + return ctrl.Result{RequeueAfter: DefaultRetryTime}, nil + } + + if s.ManagedControlPlane != nil { + log.Info("ScalewayManagedControlPlane not deleted yet, retry later") + return ctrl.Result{RequeueAfter: DefaultRetryTime}, nil + } + + managedCluster := s.ManagedCluster + + if err := r.createScalewayManagedClusterService(s).Delete(ctx); err != nil { + // Handle transient errors + var reconcileError *scaleway.ReconcileError + if errors.As(err, &reconcileError) { + if reconcileError.IsTransient() { + log.Info(fmt.Sprintf("Transient failure to reconcile ScalewayManagedCluster, retrying: %s", reconcileError.Error())) + return ctrl.Result{RequeueAfter: reconcileError.RequeueAfter()}, nil + } + } + + return ctrl.Result{}, fmt.Errorf("failed to delete cluster services: %w", err) + } + + // Cluster is deleted so remove the finalizer. + controllerutil.RemoveFinalizer(managedCluster, infrav1.ManagedClusterFinalizer) + + if err := releaseScalewaySecret(ctx, r, managedCluster, managedCluster.Spec.ScalewaySecretName); err != nil { + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *ScalewayManagedClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&infrav1.ScalewayManagedCluster{}). + WithEventFilter(predicates.ResourceNotPaused(mgr.GetScheme(), mgr.GetLogger())). + // watch ScalewayManagedControlPlane resources + Watches( + &infrav1.ScalewayManagedControlPlane{}, + handler.EnqueueRequestsFromMapFunc(r.managedControlPlaneMapper()), + ). + // Add a watch on clusterv1.Cluster object for unpause notifications. + Watches( + &clusterv1.Cluster{}, + handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, infrav1.GroupVersion.WithKind("ScalewayManagedCluster"), mgr.GetClient(), &infrav1.ScalewayManagedCluster{})), + builder.WithPredicates(predicates.ClusterUnpaused(mgr.GetScheme(), mgr.GetLogger())), + ). + Named("scalewaymanagedcluster"). + Complete(r) +} + +func (r *ScalewayManagedClusterReconciler) dependencyCount(ctx context.Context, clusterScope *scope.ManagedCluster) (int, error) { + clusterName, clusterNamespace := clusterScope.ManagedCluster.Name, clusterScope.ManagedCluster.Namespace + + listOptions := []client.ListOption{ + client.InNamespace(clusterNamespace), + client.MatchingLabels(map[string]string{clusterv1.ClusterNameLabel: clusterName}), + } + + managedMachinePools := &infrav1.ScalewayManagedMachinePoolList{} + if err := r.List(ctx, managedMachinePools, listOptions...); err != nil { + return 0, fmt.Errorf("failed to list managed machine pools for cluster %s/%s: %w", clusterNamespace, clusterName, err) + } + + return len(managedMachinePools.Items), nil +} + +func (r *ScalewayManagedClusterReconciler) managedControlPlaneMapper() handler.MapFunc { + return func(ctx context.Context, o client.Object) []ctrl.Request { + log := logf.FromContext(ctx) + + scalewayManagedControlPlane, ok := o.(*infrav1.ScalewayManagedControlPlane) + if !ok { + log.Error(fmt.Errorf("expected a ScalewayManagedControlPlane, got %T instead", o), "failed to map ScalewayManagedControlPlane") + return nil + } + + // Don't handle deleted ScalewayManagedControlPlane + if !scalewayManagedControlPlane.DeletionTimestamp.IsZero() { + return nil + } + + cluster, err := util.GetOwnerCluster(ctx, r.Client, scalewayManagedControlPlane.ObjectMeta) + if err != nil { + log.Error(err, "failed to get owning cluster") + return nil + } + if cluster == nil { + return nil + } + + managedClusterRef := cluster.Spec.InfrastructureRef + if managedClusterRef == nil || managedClusterRef.Kind != "ScalewayManagedCluster" { + return nil + } + + return []ctrl.Request{ + { + NamespacedName: types.NamespacedName{ + Name: managedClusterRef.Name, + Namespace: managedClusterRef.Namespace, + }, + }, + } + } +} diff --git a/internal/controller/scalewaymanagedcluster_controller_test.go b/internal/controller/scalewaymanagedcluster_controller_test.go new file mode 100644 index 0000000..8fd2dc3 --- /dev/null +++ b/internal/controller/scalewaymanagedcluster_controller_test.go @@ -0,0 +1,473 @@ +package controller + +import ( + "context" + "reflect" + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + infrav1 "github.com/scaleway/cluster-api-provider-scaleway/api/v1alpha1" + "github.com/scaleway/cluster-api-provider-scaleway/internal/scope" + "github.com/scaleway/scaleway-sdk-go/scw" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var _ = Describe("ScalewayManagedCluster Controller", func() { + Context("When reconciling a resource", func() { + const resourceName = "test-resource" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", + } + scalewaymanagedcluster := &infrav1.ScalewayManagedCluster{} + + BeforeEach(func() { + By("creating the custom resource for the Kind ScalewayManagedCluster") + err := k8sClient.Get(ctx, typeNamespacedName, scalewaymanagedcluster) + if err != nil && errors.IsNotFound(err) { + resource := &infrav1.ScalewayManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + Spec: infrav1.ScalewayManagedClusterSpec{ + Region: string(scw.RegionFrPar), + ProjectID: "11111111-1111-1111-1111-111111111111", + ScalewaySecretName: "test-secret", + }, + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + resource := &infrav1.ScalewayManagedCluster{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance ScalewayManagedCluster") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &ScalewayManagedClusterReconciler{ + Client: k8sClient, + createScalewayManagedClusterService: newScalewayManagedClusterService, + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + }) + }) +}) + +var _ = Describe("ScalewayManagedCluster", func() { + Context("When updating the resource", func() { + When("Basic cluster", func() { + const resourceName = "test-resource-1" + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", + } + scalewaymanagedcluster := &infrav1.ScalewayManagedCluster{} + + BeforeEach(func() { + By("creating the custom resource for the Kind ScalewayManagedCluster") + err := k8sClient.Get(ctx, typeNamespacedName, scalewaymanagedcluster) + if err != nil && errors.IsNotFound(err) { + resource := &infrav1.ScalewayManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + Spec: infrav1.ScalewayManagedClusterSpec{ + Region: string(scw.RegionFrPar), + ProjectID: "11111111-1111-1111-1111-111111111111", + ScalewaySecretName: "test-secret", + }, + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + resource := &infrav1.ScalewayManagedCluster{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance ScalewayManagedCluster") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + + It("should fail to update projectID", func(ctx SpecContext) { + By("Updating the projectID") + resource := &infrav1.ScalewayManagedCluster{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + resource.Spec.ProjectID = "11111111-1111-1111-1111-111111111110" + Expect(k8sClient.Update(ctx, resource)).NotTo(Succeed()) + }) + + It("should fail to update region", func(ctx SpecContext) { + By("Updating the region") + resource := &infrav1.ScalewayManagedCluster{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + resource.Spec.Region = "nl-ams" + Expect(k8sClient.Update(ctx, resource)).NotTo(Succeed()) + }) + + It("should succeed to update scaleway secret name", func(ctx SpecContext) { + By("Updating the region") + resource := &infrav1.ScalewayManagedCluster{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + resource.Spec.ScalewaySecretName = "my-other-secret" + Expect(k8sClient.Update(ctx, resource)).To(Succeed()) + }) + + It("should fail to set private network params", func(ctx SpecContext) { + By("Setting private network params") + resource := &infrav1.ScalewayManagedCluster{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + resource.Spec.Network = &infrav1.ManagedNetworkSpec{ + PrivateNetwork: &infrav1.PrivateNetworkParams{ + ID: scw.StringPtr("11111111-1111-1111-1111-111111111111"), + }, + } + Expect(k8sClient.Update(ctx, resource)).NotTo(Succeed()) + }) + }) + + When("ControlPlaneEndpoint is set", func() { + const resourceName = "test-resource-2" + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", + } + scalewaymanagedcluster := &infrav1.ScalewayManagedCluster{} + + BeforeEach(func(ctx SpecContext) { + By("creating the custom resource for the Kind ScalewayManagedCluster") + err := k8sClient.Get(ctx, typeNamespacedName, scalewaymanagedcluster) + if err != nil && errors.IsNotFound(err) { + resource := &infrav1.ScalewayManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + Spec: infrav1.ScalewayManagedClusterSpec{ + ProjectID: "11111111-1111-1111-1111-111111111111", + Region: string(scw.RegionFrPar), + ScalewaySecretName: "secret", + ControlPlaneEndpoint: clusterv1.APIEndpoint{ + Host: "42.42.42.42", + Port: 6443, + }, + }, + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func(ctx SpecContext) { + resource := &infrav1.ScalewayManagedCluster{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance ScalewayManagedCluster") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + + It("should fail to update host", func(ctx SpecContext) { + By("Updating the host") + resource := &infrav1.ScalewayManagedCluster{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + resource.Spec.ControlPlaneEndpoint.Host = "22.22.22.22" + Expect(k8sClient.Update(ctx, resource)).NotTo(Succeed()) + }) + + It("should fail to update port", func(ctx SpecContext) { + By("Updating the port") + resource := &infrav1.ScalewayManagedCluster{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + resource.Spec.ControlPlaneEndpoint.Port = 443 + Expect(k8sClient.Update(ctx, resource)).NotTo(Succeed()) + }) + + It("should fail to remove ControlPlaneEndpoint", func(ctx SpecContext) { + By("Removing ControlPlaneEndpoint") + resource := &infrav1.ScalewayManagedCluster{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + resource.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{} + Expect(k8sClient.Update(ctx, resource)).NotTo(Succeed()) + }) + }) + }) +}) + +var ( + managedEndpoint = clusterv1.APIEndpoint{ + Host: "clusterid.api.k8s.fr-par.scw.cloud", + Port: 6443, + } + scalewayManagedClusterNamespacedName = types.NamespacedName{ + Namespace: "caps", + Name: "scalewaymanagedcluster", + } + scalewayManagedControlPlaneNamespacedName = types.NamespacedName{ + Namespace: "caps", + Name: "scalewaymanagedcontrolplane", + } +) + +func TestScalewayManagedClusterReconciler_Reconcile(t *testing.T) { + t.Parallel() + type fields struct { + createScalewayManagedClusterService scalewayManagedClusterServiceCreator + } + type args struct { + ctx context.Context + req ctrl.Request + } + tests := []struct { + name string + fields fields + args args + want ctrl.Result + wantErr bool + objects []client.Object + asserts func(g *WithT, c client.Client) + }{ + { + name: "should reconcile normally", + fields: fields{ + createScalewayManagedClusterService: func(managedClusterScope *scope.ManagedCluster) *scalewayManagedClusterService { + return &scalewayManagedClusterService{ + scope: managedClusterScope, + Reconcile: func(ctx context.Context) error { return nil }, + Delete: func(ctx context.Context) error { return nil }, + } + }, + }, + args: args{ + ctx: context.TODO(), + req: reconcile.Request{NamespacedName: scalewayManagedClusterNamespacedName}, + }, + want: reconcile.Result{}, + objects: []client.Object{ + &infrav1.ScalewayManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: scalewayManagedClusterNamespacedName.Name, + Namespace: scalewayManagedClusterNamespacedName.Namespace, + OwnerReferences: []metav1.OwnerReference{ + { + Name: clusterNamespacedName.Name, + Kind: "Cluster", + APIVersion: clusterv1.GroupVersion.String(), + }, + }, + }, + Spec: infrav1.ScalewayManagedClusterSpec{ + Region: "fr-par", + ScalewaySecretName: secretNamespacedName.Name, + ProjectID: "11111111-1111-1111-1111-111111111111", + }, + }, + &infrav1.ScalewayManagedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: scalewayManagedControlPlaneNamespacedName.Name, + Namespace: scalewayManagedControlPlaneNamespacedName.Namespace, + }, + Spec: infrav1.ScalewayManagedControlPlaneSpec{ + Type: "kapsule", + Version: "v1.30.0", + ControlPlaneEndpoint: clusterv1.APIEndpoint{ + Host: managedEndpoint.Host, + Port: managedEndpoint.Port, + }, + }, + }, + &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterNamespacedName.Name, + Namespace: clusterNamespacedName.Namespace, + }, + Spec: clusterv1.ClusterSpec{ + ControlPlaneRef: &corev1.ObjectReference{ + Name: scalewayManagedControlPlaneNamespacedName.Name, + Namespace: scalewayManagedControlPlaneNamespacedName.Namespace, + }, + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretNamespacedName.Name, + Namespace: secretNamespacedName.Namespace, + }, + Data: map[string][]byte{ + scw.ScwAccessKeyEnv: []byte("SCWXXXXXXXXXXXXXXXXX"), + scw.ScwSecretKeyEnv: []byte("11111111-1111-1111-1111-111111111111"), + }, + }, + }, + asserts: func(g *WithT, c client.Client) { + // ScalewayManagedCluster checks + sc := &infrav1.ScalewayManagedCluster{} + g.Expect(c.Get(context.TODO(), scalewayManagedClusterNamespacedName, sc)).To(Succeed()) + g.Expect(sc.Status.Ready).To(BeTrue()) + g.Expect(sc.Spec.ControlPlaneEndpoint.Host).To(Equal(managedEndpoint.Host)) + g.Expect(sc.Spec.ControlPlaneEndpoint.Port).To(Equal(managedEndpoint.Port)) + g.Expect(sc.Finalizers).To(ContainElement(infrav1.ManagedClusterFinalizer)) + + // Secret checks + s := &corev1.Secret{} + g.Expect(c.Get(context.TODO(), secretNamespacedName, s)).To(Succeed()) + g.Expect(s.Finalizers).To(ContainElement(SecretFinalizer)) + g.Expect(s.OwnerReferences).NotTo(BeEmpty()) + }, + }, + { + name: "should reconcile deletion", + fields: fields{ + createScalewayManagedClusterService: func(managedClusterScope *scope.ManagedCluster) *scalewayManagedClusterService { + return &scalewayManagedClusterService{ + scope: managedClusterScope, + Reconcile: func(ctx context.Context) error { return nil }, + Delete: func(ctx context.Context) error { return nil }, + } + }, + }, + args: args{ + ctx: context.TODO(), + req: reconcile.Request{NamespacedName: scalewayManagedClusterNamespacedName}, + }, + want: reconcile.Result{}, + objects: []client.Object{ + &infrav1.ScalewayManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: scalewayManagedClusterNamespacedName.Name, + Namespace: scalewayManagedClusterNamespacedName.Namespace, + OwnerReferences: []metav1.OwnerReference{ + { + Name: clusterNamespacedName.Name, + Kind: "Cluster", + APIVersion: clusterv1.GroupVersion.String(), + }, + }, + Finalizers: []string{infrav1.ManagedClusterFinalizer}, + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + }, + Spec: infrav1.ScalewayManagedClusterSpec{ + Region: "fr-par", + ScalewaySecretName: secretNamespacedName.Name, + ProjectID: "11111111-1111-1111-1111-111111111111", + }, + }, + &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterNamespacedName.Name, + Namespace: clusterNamespacedName.Namespace, + }, + Spec: clusterv1.ClusterSpec{ + ControlPlaneRef: &corev1.ObjectReference{ + Name: scalewayManagedControlPlaneNamespacedName.Name, + Namespace: scalewayManagedControlPlaneNamespacedName.Namespace, + }, + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretNamespacedName.Name, + Namespace: secretNamespacedName.Namespace, + }, + Data: map[string][]byte{ + scw.ScwAccessKeyEnv: []byte("SCWXXXXXXXXXXXXXXXXX"), + scw.ScwSecretKeyEnv: []byte("11111111-1111-1111-1111-111111111111"), + }, + }, + }, + asserts: func(g *WithT, c client.Client) { + // ScalewayManagedCluster should not exist anymore if the finalizer was correctly removed. + sc := &infrav1.ScalewayManagedCluster{} + g.Expect(c.Get(context.TODO(), scalewayManagedClusterNamespacedName, sc)).NotTo(Succeed()) + + // Secret checks + s := &corev1.Secret{} + g.Expect(c.Get(context.TODO(), secretNamespacedName, s)).To(Succeed()) + g.Expect(s.Finalizers).NotTo(ContainElement(SecretFinalizer)) + g.Expect(s.OwnerReferences).To(BeEmpty()) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + g := NewWithT(t) + sb := runtime.NewSchemeBuilder( + corev1.AddToScheme, + clusterv1.AddToScheme, + infrav1.AddToScheme, + ) + s := runtime.NewScheme() + + g.Expect(sb.AddToScheme(s)).To(Succeed()) + + runtimeObjects := make([]runtime.Object, 0, len(tt.objects)) + for _, obj := range tt.objects { + runtimeObjects = append(runtimeObjects, obj) + } + + c := fake.NewClientBuilder(). + WithScheme(s). + WithRuntimeObjects(runtimeObjects...). + WithStatusSubresource(tt.objects...). + Build() + + r := &ScalewayManagedClusterReconciler{ + Client: c, + createScalewayManagedClusterService: tt.fields.createScalewayManagedClusterService, + } + got, err := r.Reconcile(tt.args.ctx, tt.args.req) + if (err != nil) != tt.wantErr { + t.Errorf("ScalewayManagedClusterReconciler.Reconcile() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("ScalewayManagedClusterReconciler.Reconcile() = %v, want %v", got, tt.want) + } + tt.asserts(g, c) + }) + } +} diff --git a/internal/controller/scalewaymanagedcluster_reconciler.go b/internal/controller/scalewaymanagedcluster_reconciler.go new file mode 100644 index 0000000..60ed36a --- /dev/null +++ b/internal/controller/scalewaymanagedcluster_reconciler.go @@ -0,0 +1,55 @@ +package controller + +import ( + "context" + "fmt" + + "github.com/scaleway/cluster-api-provider-scaleway/internal/scope" + "github.com/scaleway/cluster-api-provider-scaleway/internal/service/scaleway" + "github.com/scaleway/cluster-api-provider-scaleway/internal/service/scaleway/vpc" + "github.com/scaleway/cluster-api-provider-scaleway/internal/service/scaleway/vpcgw" +) + +type scalewayManagedClusterService struct { + scope *scope.ManagedCluster + // services is the list of services that are reconciled by this controller. + // The order of the services is important as it determines the order in which the services are reconciled. + services []scaleway.ServiceReconciler + Reconcile func(context.Context) error + Delete func(context.Context) error +} + +func newScalewayManagedClusterService(s *scope.ManagedCluster) *scalewayManagedClusterService { + scs := &scalewayManagedClusterService{ + scope: s, + services: []scaleway.ServiceReconciler{ + vpc.New(s), + vpcgw.New(s), + }, + } + + scs.Reconcile = scs.reconcile + scs.Delete = scs.delete + + return scs +} + +func (s *scalewayManagedClusterService) reconcile(ctx context.Context) error { + for _, service := range s.services { + if err := service.Reconcile(ctx); err != nil { + return fmt.Errorf("failed to reconcile ScalewayManagedCluster service %s: %w", service.Name(), err) + } + } + + return nil +} + +func (s *scalewayManagedClusterService) delete(ctx context.Context) error { + for i := len(s.services) - 1; i >= 0; i-- { + if err := s.services[i].Delete(ctx); err != nil { + return fmt.Errorf("failed to delete ScalewayManagedCluster service %s: %w", s.services[i].Name(), err) + } + } + + return nil +} diff --git a/internal/controller/scalewaymanagedcontrolplane_controller.go b/internal/controller/scalewaymanagedcontrolplane_controller.go new file mode 100644 index 0000000..0d5be05 --- /dev/null +++ b/internal/controller/scalewaymanagedcontrolplane_controller.go @@ -0,0 +1,198 @@ +package controller + +import ( + "context" + "errors" + "fmt" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/annotations" + "sigs.k8s.io/cluster-api/util/predicates" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + infrav1 "github.com/scaleway/cluster-api-provider-scaleway/api/v1alpha1" + "github.com/scaleway/cluster-api-provider-scaleway/internal/scope" + "github.com/scaleway/cluster-api-provider-scaleway/internal/service/scaleway" +) + +// ScalewayManagedControlPlaneReconciler reconciles a ScalewayManagedControlPlane object +type ScalewayManagedControlPlaneReconciler struct { + client.Client + createScalewayManagedControlPlaneService scalewayManagedControlPlaneServiceCreator +} + +// scalewayManagedControlPlaneServiceCreator is a function that creates a new scalewayManagedControlPlaneService reconciler. +type scalewayManagedControlPlaneServiceCreator func(*scope.ManagedControlPlane) *scalewayManagedControlPlaneService + +func NewScalewayManagedControlPlaneReconciler(c client.Client) *ScalewayManagedControlPlaneReconciler { + return &ScalewayManagedControlPlaneReconciler{ + Client: c, + createScalewayManagedControlPlaneService: newScalewayManagedControlPlaneService, + } +} + +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=scalewaymanagedcontrolplanes,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=scalewaymanagedcontrolplanes/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=scalewaymanagedcontrolplanes/finalizers,verbs=update +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=scalewaymanagedclusters,verbs=get;list;watch +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=scalewaymanagedmachinepools,verbs=get;list;watch +// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;delete;patch + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +func (r *ScalewayManagedControlPlaneReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, retErr error) { + log := logf.FromContext(ctx) + + // Fetch the ScalewayManagedControlPlane instance + managedControlPlane := &infrav1.ScalewayManagedControlPlane{} + if err := r.Get(ctx, req.NamespacedName, managedControlPlane); err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + + // Fetch the Cluster. + cluster, err := util.GetOwnerCluster(ctx, r.Client, managedControlPlane.ObjectMeta) + if err != nil { + return ctrl.Result{}, err + } + if cluster == nil { + log.Info("Cluster Controller has not yet set OwnerRef") + return ctrl.Result{}, nil + } + + log = log.WithValues("cluster", cluster.Name) + + if annotations.IsPaused(cluster, managedControlPlane) { + log.Info("Reconciliation is paused for this object") + return ctrl.Result{}, nil + } + + // Get the managed cluster + managedCluster := &infrav1.ScalewayManagedCluster{} + key := client.ObjectKey{ + Namespace: managedControlPlane.Namespace, + Name: cluster.Spec.InfrastructureRef.Name, + } + + if err := r.Get(ctx, key, managedCluster); err != nil { + log.Error(err, "Failed to retrieve ScalewayManagedCluster from the API Server") + return ctrl.Result{}, err + } + + managedControlPlaneScope, err := scope.NewManagedControlPlane(ctx, &scope.ManagedControlPlaneParams{ + Client: r.Client, + Cluster: cluster, + ManagedCluster: managedCluster, + ManagedControlPlane: managedControlPlane, + }) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to create scope: %w", err) + } + + // Always close the scope when exiting this function so we can persist any ScalewayManagedControlPlane changes. + defer func() { + if err := managedControlPlaneScope.Close(ctx); err != nil && retErr == nil { + retErr = err + } + }() + + // Handle deleted clusters + if !managedControlPlane.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, managedControlPlaneScope) + } + + return r.reconcileNormal(ctx, managedControlPlaneScope) +} + +func (r *ScalewayManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, s *scope.ManagedControlPlane) (ctrl.Result, error) { + log := logf.FromContext(ctx) + + log.Info("Reconciling ScalewayManagedControlPlane") + managedControlPlane := s.ManagedControlPlane + + // Register our finalizer immediately to avoid orphaning Scaleway resources on delete + if controllerutil.AddFinalizer(managedControlPlane, infrav1.ManagedControlPlaneFinalizer) { + if err := s.PatchObject(ctx); err != nil { + return ctrl.Result{}, err + } + } + + if !s.ManagedCluster.Status.Ready { + log.Info("ScalewayManagedCluster not ready yet, retry later") + return ctrl.Result{RequeueAfter: time.Second}, nil + } + + if err := r.createScalewayManagedControlPlaneService(s).Reconcile(ctx); err != nil { + // Handle terminal & transient errors + var reconcileError *scaleway.ReconcileError + if errors.As(err, &reconcileError) { + if reconcileError.IsTerminal() { + log.Error(err, "Failed to reconcile ScalewayManagedControlPlane") + return ctrl.Result{}, nil + } else if reconcileError.IsTransient() { + log.Info(fmt.Sprintf("Transient failure to reconcile ScalewayManagedControlPlane, retrying: %s", reconcileError.Error())) + return ctrl.Result{RequeueAfter: reconcileError.RequeueAfter()}, nil + } + } + + return ctrl.Result{}, fmt.Errorf("failed to reconcile cluster services: %w", err) + } + + s.ManagedControlPlane.Status.Initialized = true + s.ManagedControlPlane.Status.Ready = true + s.ManagedControlPlane.Status.ExternalManagedControlPlane = true + s.ManagedControlPlane.Spec.Version = s.FixedVersion() + + return ctrl.Result{}, nil +} + +func (r *ScalewayManagedControlPlaneReconciler) reconcileDelete(ctx context.Context, s *scope.ManagedControlPlane) (ctrl.Result, error) { + log := logf.FromContext(ctx) + + log.Info("Reconciling ScalewayManagedControlPlane delete") + + managedControlPlane := s.ManagedControlPlane + + if err := r.createScalewayManagedControlPlaneService(s).Delete(ctx); err != nil { + // Handle transient errors + var reconcileError *scaleway.ReconcileError + if errors.As(err, &reconcileError) { + if reconcileError.IsTransient() { + log.Info(fmt.Sprintf("Transient failure to reconcile ScalewayManagedControlPlane, retrying: %s", reconcileError.Error())) + return ctrl.Result{RequeueAfter: reconcileError.RequeueAfter()}, nil + } + } + + return ctrl.Result{}, fmt.Errorf("failed to delete cluster services: %w", err) + } + + // Cluster is deleted so remove the finalizer. + controllerutil.RemoveFinalizer(managedControlPlane, infrav1.ManagedControlPlaneFinalizer) + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *ScalewayManagedControlPlaneReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&infrav1.ScalewayManagedControlPlane{}). + Named("scalewaymanagedcontrolplane"). + WithEventFilter(predicates.ResourceNotPaused(mgr.GetScheme(), mgr.GetLogger())). + // Add a watch on clusterv1.Cluster object for unpause and infra ready notifications. + Watches( + &clusterv1.Cluster{}, + handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, infrav1.GroupVersion.WithKind("ScalewayManagedControlPlane"), mgr.GetClient(), &infrav1.ScalewayManagedControlPlane{})), + builder.WithPredicates(predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), mgr.GetLogger())), + ). + Complete(r) +} diff --git a/internal/controller/scalewaymanagedcontrolplane_controller_test.go b/internal/controller/scalewaymanagedcontrolplane_controller_test.go new file mode 100644 index 0000000..c7f43b0 --- /dev/null +++ b/internal/controller/scalewaymanagedcontrolplane_controller_test.go @@ -0,0 +1,451 @@ +package controller + +import ( + "context" + "reflect" + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + infrav1 "github.com/scaleway/cluster-api-provider-scaleway/api/v1alpha1" + "github.com/scaleway/cluster-api-provider-scaleway/internal/scope" + "github.com/scaleway/scaleway-sdk-go/scw" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var _ = Describe("ScalewayManagedControlPlane Controller", func() { + Context("When reconciling a resource", func() { + const resourceName = "test-resource" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", + } + scalewaymanagedcontrolplane := &infrav1.ScalewayManagedControlPlane{} + + BeforeEach(func() { + By("creating the custom resource for the Kind ScalewayManagedControlPlane") + err := k8sClient.Get(ctx, typeNamespacedName, scalewaymanagedcontrolplane) + if err != nil && errors.IsNotFound(err) { + resource := &infrav1.ScalewayManagedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + Spec: infrav1.ScalewayManagedControlPlaneSpec{ + Type: "kapsule", + Version: "v1.30.0", + }, + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + resource := &infrav1.ScalewayManagedControlPlane{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance ScalewayManagedControlPlane") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &ScalewayManagedControlPlaneReconciler{ + Client: k8sClient, + createScalewayManagedControlPlaneService: newScalewayManagedControlPlaneService, + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + }) + }) +}) + +var _ = Describe("ScalewayManagedControlPlane", func() { + Context("When updating the resource", func() { + When("Basic control plane", func() { + const resourceName = "test-resource-1" + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", + } + scalewaymanagedcontrolplane := &infrav1.ScalewayManagedControlPlane{} + + BeforeEach(func() { + By("creating the custom resource for the Kind ScalewayManagedControlPlane") + err := k8sClient.Get(ctx, typeNamespacedName, scalewaymanagedcontrolplane) + if err != nil && errors.IsNotFound(err) { + resource := &infrav1.ScalewayManagedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + Spec: infrav1.ScalewayManagedControlPlaneSpec{ + Type: "kapsule", + Version: "v1.30.0", + }, + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + resource := &infrav1.ScalewayManagedControlPlane{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance ScalewayManagedControlPlane") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + + It("should fail to set CNI", func(ctx SpecContext) { + By("Setting CNI") + resource := &infrav1.ScalewayManagedControlPlane{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + resource.Spec.CNI = scw.StringPtr("calico") + Expect(k8sClient.Update(ctx, resource)).NotTo(Succeed()) + }) + }) + + When("ControlPlaneEndpoint is set", func() { + const resourceName = "test-resource-2" + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", + } + scalewaymanagedcontrolplane := &infrav1.ScalewayManagedControlPlane{} + + BeforeEach(func(ctx SpecContext) { + By("creating the custom resource for the Kind ScalewayManagedControlPlane") + err := k8sClient.Get(ctx, typeNamespacedName, scalewaymanagedcontrolplane) + if err != nil && errors.IsNotFound(err) { + resource := &infrav1.ScalewayManagedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + Spec: infrav1.ScalewayManagedControlPlaneSpec{ + Type: "kapsule", + Version: "v1.30.0", + ControlPlaneEndpoint: clusterv1.APIEndpoint{ + Host: "42.42.42.42", + Port: 6443, + }, + }, + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func(ctx SpecContext) { + resource := &infrav1.ScalewayManagedControlPlane{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance ScalewayManagedControlPlane") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + + It("should fail to update host", func(ctx SpecContext) { + By("Updating the host") + resource := &infrav1.ScalewayManagedControlPlane{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + resource.Spec.ControlPlaneEndpoint.Host = "33.33.33.33" + Expect(k8sClient.Update(ctx, resource)).NotTo(Succeed()) + }) + + It("should fail to update port", func(ctx SpecContext) { + By("Updating the port") + resource := &infrav1.ScalewayManagedControlPlane{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + resource.Spec.ControlPlaneEndpoint.Port = 443 + Expect(k8sClient.Update(ctx, resource)).NotTo(Succeed()) + }) + + It("should fail to remove ControlPlaneEndpoint", func(ctx SpecContext) { + By("Removing ControlPlaneEndpoint") + resource := &infrav1.ScalewayManagedControlPlane{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + resource.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{} + Expect(k8sClient.Update(ctx, resource)).NotTo(Succeed()) + }) + }) + }) +}) + +func TestScalewayManagedControlPlaneReconciler_Reconcile(t *testing.T) { + t.Parallel() + type fields struct { + createScalewayManagedControlPlaneService scalewayManagedControlPlaneServiceCreator + } + type args struct { + ctx context.Context + req ctrl.Request + } + tests := []struct { + name string + fields fields + args args + want ctrl.Result + wantErr bool + objects []client.Object + asserts func(g *WithT, c client.Client) + }{ + { + name: "should reconcile normally", + fields: fields{ + createScalewayManagedControlPlaneService: func(managedControlPlaneScope *scope.ManagedControlPlane) *scalewayManagedControlPlaneService { + return &scalewayManagedControlPlaneService{ + scope: managedControlPlaneScope, + Reconcile: func(ctx context.Context) error { return nil }, + Delete: func(ctx context.Context) error { return nil }, + } + }, + }, + args: args{ + ctx: context.TODO(), + req: reconcile.Request{NamespacedName: scalewayManagedControlPlaneNamespacedName}, + }, + want: reconcile.Result{}, + objects: []client.Object{ + &infrav1.ScalewayManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: scalewayManagedClusterNamespacedName.Name, + Namespace: scalewayManagedClusterNamespacedName.Namespace, + OwnerReferences: []metav1.OwnerReference{ + { + Name: clusterNamespacedName.Name, + Kind: "Cluster", + APIVersion: clusterv1.GroupVersion.String(), + }, + }, + }, + Spec: infrav1.ScalewayManagedClusterSpec{ + Region: "fr-par", + ScalewaySecretName: secretNamespacedName.Name, + ProjectID: "11111111-1111-1111-1111-111111111111", + }, + Status: infrav1.ScalewayManagedClusterStatus{ + Ready: true, + }, + }, + &infrav1.ScalewayManagedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: scalewayManagedControlPlaneNamespacedName.Name, + Namespace: scalewayManagedControlPlaneNamespacedName.Namespace, + OwnerReferences: []metav1.OwnerReference{ + { + Name: clusterNamespacedName.Name, + Kind: "Cluster", + APIVersion: clusterv1.GroupVersion.String(), + }, + }, + }, + Spec: infrav1.ScalewayManagedControlPlaneSpec{ + Type: "kapsule", + Version: "v1.30.0", + ControlPlaneEndpoint: clusterv1.APIEndpoint{ + Host: managedEndpoint.Host, + Port: managedEndpoint.Port, + }, + }, + }, + &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterNamespacedName.Name, + Namespace: clusterNamespacedName.Namespace, + }, + Spec: clusterv1.ClusterSpec{ + ControlPlaneRef: &corev1.ObjectReference{ + Name: scalewayManagedControlPlaneNamespacedName.Name, + Namespace: scalewayManagedControlPlaneNamespacedName.Namespace, + }, + InfrastructureRef: &corev1.ObjectReference{ + Name: scalewayManagedClusterNamespacedName.Name, + Namespace: scalewayManagedClusterNamespacedName.Namespace, + }, + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretNamespacedName.Name, + Namespace: secretNamespacedName.Namespace, + }, + Data: map[string][]byte{ + scw.ScwAccessKeyEnv: []byte("SCWXXXXXXXXXXXXXXXXX"), + scw.ScwSecretKeyEnv: []byte("11111111-1111-1111-1111-111111111111"), + }, + }, + }, + asserts: func(g *WithT, c client.Client) { + // ScalewayManagedControlPlane checks + smcp := &infrav1.ScalewayManagedControlPlane{} + g.Expect(c.Get(context.TODO(), scalewayManagedControlPlaneNamespacedName, smcp)).To(Succeed()) + g.Expect(smcp.Status.Ready).To(BeTrue()) + g.Expect(smcp.Status.Initialized).To(BeTrue()) + g.Expect(smcp.Status.ExternalManagedControlPlane).To(BeTrue()) + g.Expect(smcp.Finalizers).To(ContainElement(infrav1.ManagedControlPlaneFinalizer)) + }, + }, + { + name: "should reconcile deletion", + fields: fields{ + createScalewayManagedControlPlaneService: func(managedControlPlaneScope *scope.ManagedControlPlane) *scalewayManagedControlPlaneService { + return &scalewayManagedControlPlaneService{ + scope: managedControlPlaneScope, + Reconcile: func(ctx context.Context) error { return nil }, + Delete: func(ctx context.Context) error { return nil }, + } + }, + }, + args: args{ + ctx: context.TODO(), + req: reconcile.Request{NamespacedName: scalewayManagedControlPlaneNamespacedName}, + }, + want: reconcile.Result{}, + objects: []client.Object{ + &infrav1.ScalewayManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: scalewayManagedClusterNamespacedName.Name, + Namespace: scalewayManagedClusterNamespacedName.Namespace, + OwnerReferences: []metav1.OwnerReference{ + { + Name: clusterNamespacedName.Name, + Kind: "Cluster", + APIVersion: clusterv1.GroupVersion.String(), + }, + }, + }, + Spec: infrav1.ScalewayManagedClusterSpec{ + Region: "fr-par", + ScalewaySecretName: secretNamespacedName.Name, + ProjectID: "11111111-1111-1111-1111-111111111111", + }, + Status: infrav1.ScalewayManagedClusterStatus{ + Ready: true, + }, + }, + &infrav1.ScalewayManagedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: scalewayManagedControlPlaneNamespacedName.Name, + Namespace: scalewayManagedControlPlaneNamespacedName.Namespace, + OwnerReferences: []metav1.OwnerReference{ + { + Name: clusterNamespacedName.Name, + Kind: "Cluster", + APIVersion: clusterv1.GroupVersion.String(), + }, + }, + Finalizers: []string{infrav1.ManagedControlPlaneFinalizer}, + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + }, + Spec: infrav1.ScalewayManagedControlPlaneSpec{ + Type: "kapsule", + Version: "v1.30.0", + ControlPlaneEndpoint: clusterv1.APIEndpoint{ + Host: managedEndpoint.Host, + Port: managedEndpoint.Port, + }, + }, + }, + &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterNamespacedName.Name, + Namespace: clusterNamespacedName.Namespace, + }, + Spec: clusterv1.ClusterSpec{ + ControlPlaneRef: &corev1.ObjectReference{ + Name: scalewayManagedControlPlaneNamespacedName.Name, + Namespace: scalewayManagedControlPlaneNamespacedName.Namespace, + }, + InfrastructureRef: &corev1.ObjectReference{ + Name: scalewayManagedClusterNamespacedName.Name, + Namespace: scalewayManagedClusterNamespacedName.Namespace, + }, + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretNamespacedName.Name, + Namespace: secretNamespacedName.Namespace, + }, + Data: map[string][]byte{ + scw.ScwAccessKeyEnv: []byte("SCWXXXXXXXXXXXXXXXXX"), + scw.ScwSecretKeyEnv: []byte("11111111-1111-1111-1111-111111111111"), + }, + }, + }, + asserts: func(g *WithT, c client.Client) { + // ScalewayManagedControlPlane should not exist anymore if the finalizer was correctly removed. + smcp := &infrav1.ScalewayManagedControlPlane{} + g.Expect(c.Get(context.TODO(), scalewayManagedControlPlaneNamespacedName, smcp)).NotTo(Succeed()) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + g := NewWithT(t) + sb := runtime.NewSchemeBuilder( + corev1.AddToScheme, + clusterv1.AddToScheme, + infrav1.AddToScheme, + ) + s := runtime.NewScheme() + + g.Expect(sb.AddToScheme(s)).To(Succeed()) + + runtimeObjects := make([]runtime.Object, 0, len(tt.objects)) + for _, obj := range tt.objects { + runtimeObjects = append(runtimeObjects, obj) + } + + c := fake.NewClientBuilder(). + WithScheme(s). + WithRuntimeObjects(runtimeObjects...). + WithStatusSubresource(tt.objects...). + Build() + + r := &ScalewayManagedControlPlaneReconciler{ + Client: c, + createScalewayManagedControlPlaneService: tt.fields.createScalewayManagedControlPlaneService, + } + got, err := r.Reconcile(tt.args.ctx, tt.args.req) + if (err != nil) != tt.wantErr { + t.Errorf("ScalewayManagedControlPlaneReconciler.Reconcile() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("ScalewayManagedControlPlaneReconciler.Reconcile() = %v, want %v", got, tt.want) + } + tt.asserts(g, c) + }) + } +} diff --git a/internal/controller/scalewaymanagedcontrolplane_reconciler.go b/internal/controller/scalewaymanagedcontrolplane_reconciler.go new file mode 100644 index 0000000..e7f2c1d --- /dev/null +++ b/internal/controller/scalewaymanagedcontrolplane_reconciler.go @@ -0,0 +1,53 @@ +package controller + +import ( + "context" + "fmt" + + "github.com/scaleway/cluster-api-provider-scaleway/internal/scope" + "github.com/scaleway/cluster-api-provider-scaleway/internal/service/scaleway" + "github.com/scaleway/cluster-api-provider-scaleway/internal/service/scaleway/k8s/cluster" +) + +type scalewayManagedControlPlaneService struct { + scope *scope.ManagedControlPlane + // services is the list of services that are reconciled by this controller. + // The order of the services is important as it determines the order in which the services are reconciled. + services []scaleway.ServiceReconciler + Reconcile func(context.Context) error + Delete func(context.Context) error +} + +func newScalewayManagedControlPlaneService(s *scope.ManagedControlPlane) *scalewayManagedControlPlaneService { + scs := &scalewayManagedControlPlaneService{ + scope: s, + services: []scaleway.ServiceReconciler{ + cluster.New(s), + }, + } + + scs.Reconcile = scs.reconcile + scs.Delete = scs.delete + + return scs +} + +func (s *scalewayManagedControlPlaneService) reconcile(ctx context.Context) error { + for _, service := range s.services { + if err := service.Reconcile(ctx); err != nil { + return fmt.Errorf("failed to reconcile ScalewayManagedControlPlane service %s: %w", service.Name(), err) + } + } + + return nil +} + +func (s *scalewayManagedControlPlaneService) delete(ctx context.Context) error { + for i := len(s.services) - 1; i >= 0; i-- { + if err := s.services[i].Delete(ctx); err != nil { + return fmt.Errorf("failed to delete ScalewayManagedControlPlane service %s: %w", s.services[i].Name(), err) + } + } + + return nil +} diff --git a/internal/controller/scalewaymanagedmachinepool_controller.go b/internal/controller/scalewaymanagedmachinepool_controller.go new file mode 100644 index 0000000..f2b2ec5 --- /dev/null +++ b/internal/controller/scalewaymanagedmachinepool_controller.go @@ -0,0 +1,338 @@ +package controller + +import ( + "context" + "errors" + "fmt" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/annotations" + "sigs.k8s.io/cluster-api/util/predicates" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + logf "sigs.k8s.io/controller-runtime/pkg/log" + + infrav1 "github.com/scaleway/cluster-api-provider-scaleway/api/v1alpha1" + "github.com/scaleway/cluster-api-provider-scaleway/internal/scope" + "github.com/scaleway/cluster-api-provider-scaleway/internal/service/scaleway" +) + +// ScalewayManagedMachinePoolReconciler reconciles a ScalewayManagedMachinePool object +type ScalewayManagedMachinePoolReconciler struct { + client.Client + createScalewayManagedMachinePoolService scalewayManagedMachinePoolServiceCreator +} + +// scalewayManagedControlPlaneServiceCreator is a function that creates a new scalewayManagedControlPlaneService reconciler. +type scalewayManagedMachinePoolServiceCreator func(*scope.ManagedMachinePool) *scalewayManagedMachinePoolService + +func NewScalewayManagedMachinePoolReconciler(c client.Client) *ScalewayManagedMachinePoolReconciler { + return &ScalewayManagedMachinePoolReconciler{ + Client: c, + createScalewayManagedMachinePoolService: newScalewayManagedMachinePoolService, + } +} + +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=scalewaymanagedmachinepools,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=scalewaymanagedmachinepools/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io,resources=scalewaymanagedmachinepools/finalizers,verbs=update +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=machinepools;machinepools/status,verbs=get;list;watch +// +kubebuilder:rbac:groups=cluster.x-k8s.io,resources=clusters;clusters/status,verbs=get;list;watch + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +func (r *ScalewayManagedMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, retErr error) { + log := logf.FromContext(ctx) + + // Get the managed machine pool + managedMachinePool := &infrav1.ScalewayManagedMachinePool{} + if err := r.Get(ctx, req.NamespacedName, managedMachinePool); err != nil { + if apierrors.IsNotFound(err) { + return ctrl.Result{}, nil + } + return ctrl.Result{}, err + } + + // Get the machine pool + machinePool, err := getOwnerMachinePool(ctx, r.Client, managedMachinePool.ObjectMeta) + if err != nil { + return ctrl.Result{}, err + } + if machinePool == nil { + return ctrl.Result{}, nil + } + + // Get the cluster + cluster, err := util.GetClusterFromMetadata(ctx, r.Client, machinePool.ObjectMeta) + if err != nil { + log.Info("Failed to retrieve Cluster from MachinePool") + return ctrl.Result{}, err + } + if annotations.IsPaused(cluster, managedMachinePool) { + log.Info("Reconciliation is paused for this object") + return ctrl.Result{}, nil + } + + // Get the managed cluster + managedClusterKey := client.ObjectKey{ + Namespace: managedMachinePool.Namespace, + Name: cluster.Spec.InfrastructureRef.Name, + } + managedCluster := &infrav1.ScalewayManagedCluster{} + if err := r.Get(ctx, managedClusterKey, managedCluster); err != nil { + return ctrl.Result{}, err + } + + // Get the managed control plane + managedControlPlaneKey := client.ObjectKey{ + Namespace: managedMachinePool.Namespace, + Name: cluster.Spec.ControlPlaneRef.Name, + } + managedControlPlane := &infrav1.ScalewayManagedControlPlane{} + if err := r.Get(ctx, managedControlPlaneKey, managedControlPlane); err != nil { + log.Info("Failed to retrieve ManagedControlPlane from ManagedMachinePool") + return ctrl.Result{}, nil + } + + managedMachinePoolScope, err := scope.NewManagedMachinePool(ctx, &scope.ManagedMachinePoolParams{ + Client: r.Client, + Cluster: cluster, + MachinePool: machinePool, + ManagedCluster: managedCluster, + ManagedControlPlane: managedControlPlane, + ManagedMachinePool: managedMachinePool, + }) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to create scope: %w", err) + } + + // Always close the scope when exiting this function so we can persist any ScalewayManagedMachinePool changes. + defer func() { + if err := managedMachinePoolScope.Close(ctx); err != nil && retErr == nil { + retErr = err + } + }() + + // Handle deleted machine pool + if !managedMachinePool.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, managedMachinePoolScope) + } + + // Handle non-deleted machine pool + return r.reconcileNormal(ctx, managedMachinePoolScope) +} + +func (r *ScalewayManagedMachinePoolReconciler) reconcileNormal(ctx context.Context, s *scope.ManagedMachinePool) (ctrl.Result, error) { + log := logf.FromContext(ctx) + + log.Info("Reconciling ScalewayManagedMachinePool") + managedMachinePool := s.ManagedMachinePool + + // Register our finalizer immediately to avoid orphaning Scaleway resources on delete + if controllerutil.AddFinalizer(managedMachinePool, infrav1.ManagedMachinePoolFinalizer) { + if err := s.PatchObject(ctx); err != nil { + return ctrl.Result{}, err + } + } + + if err := r.createScalewayManagedMachinePoolService(s).Reconcile(ctx); err != nil { + // Handle terminal & transient errors + var reconcileError *scaleway.ReconcileError + if errors.As(err, &reconcileError) { + if reconcileError.IsTerminal() { + log.Error(err, "Failed to reconcile ScalewayManagedMachinePool") + return ctrl.Result{}, nil + } else if reconcileError.IsTransient() { + log.Info(fmt.Sprintf("Transient failure to reconcile ScalewayManagedMachinePool, retrying: %s", reconcileError.Error())) + return ctrl.Result{RequeueAfter: reconcileError.RequeueAfter()}, nil + } + } + + return ctrl.Result{}, fmt.Errorf("failed to reconcile cluster services: %w", err) + } + + s.ManagedMachinePool.Status.Ready = true + + return ctrl.Result{}, nil +} + +func (r *ScalewayManagedMachinePoolReconciler) reconcileDelete(ctx context.Context, s *scope.ManagedMachinePool) (ctrl.Result, error) { + log := logf.FromContext(ctx) + + log.Info("Reconciling ScalewayManagedMachinePool delete") + + managedMachinePool := s.ManagedMachinePool + + if err := r.createScalewayManagedMachinePoolService(s).Delete(ctx); err != nil { + // Handle transient errors + var reconcileError *scaleway.ReconcileError + if errors.As(err, &reconcileError) { + if reconcileError.IsTransient() { + log.Info(fmt.Sprintf("Transient failure to reconcile ScalewayManagedMachinePool, retrying: %s", reconcileError.Error())) + return ctrl.Result{RequeueAfter: reconcileError.RequeueAfter()}, nil + } + } + + return ctrl.Result{}, fmt.Errorf("failed to delete services: %w", err) + } + + // Pool is deleted so remove the finalizer. + controllerutil.RemoveFinalizer(managedMachinePool, infrav1.ManagedMachinePoolFinalizer) + + return ctrl.Result{}, nil +} + +// SetupWithManager sets up the controller with the Manager. +func (r *ScalewayManagedMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { + scalewayManagedMachinePoolMapper, err := util.ClusterToTypedObjectsMapper(r.Client, &infrav1.ScalewayManagedMachinePoolList{}, mgr.GetScheme()) + if err != nil { + return fmt.Errorf("failed to create mapper for Cluster to ScalewayManagedMachinePools: %w", err) + } + + return ctrl.NewControllerManagedBy(mgr). + For(&infrav1.ScalewayManagedMachinePool{}). + Named("scalewaymanagedmachinepool"). + WithEventFilter(predicates.ResourceNotPaused(mgr.GetScheme(), mgr.GetLogger())). + // watch for changes in CAPI MachinePool resources + Watches( + &expclusterv1.MachinePool{}, + handler.EnqueueRequestsFromMapFunc(machinePoolToInfrastructureMapFunc(infrav1.GroupVersion.WithKind("ScalewayManagedMachinePool"))), + ). + // watch for changes in ScalewayManagedControlPlanes + Watches( + &infrav1.ScalewayManagedControlPlane{}, + handler.EnqueueRequestsFromMapFunc(managedControlPlaneToManagedMachinePoolMapFunc(ctx, mgr.GetClient(), infrav1.GroupVersion.WithKind("ScalewayManagedMachinePool"))), + ). + // Add a watch on clusterv1.Cluster object for pause/unpause & ready notifications. + Watches( + &clusterv1.Cluster{}, + handler.EnqueueRequestsFromMapFunc(scalewayManagedMachinePoolMapper), + builder.WithPredicates(predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), mgr.GetLogger())), + ). + Complete(r) +} + +// getOwnerMachinePool returns the MachinePool object owning the current resource. +func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*expclusterv1.MachinePool, error) { + for _, ref := range obj.OwnerReferences { + if ref.Kind != "MachinePool" { + continue + } + gv, err := schema.ParseGroupVersion(ref.APIVersion) + if err != nil { + return nil, fmt.Errorf("failed to parse group version: %w", err) + } + if gv.Group == expclusterv1.GroupVersion.Group { + return getMachinePoolByName(ctx, c, obj.Namespace, ref.Name) + } + } + return nil, nil +} + +// getMachinePoolByName finds and return a Machine object using the specified params. +func getMachinePoolByName(ctx context.Context, c client.Client, namespace, name string) (*expclusterv1.MachinePool, error) { + m := &expclusterv1.MachinePool{} + key := client.ObjectKey{Name: name, Namespace: namespace} + if err := c.Get(ctx, key, m); err != nil { + return nil, err + } + return m, nil +} + +// machinePoolToInfrastructureMapFunc returns a handler.MapFunc that watches for +// MachinePool events and returns reconciliation requests for an infrastructure provider object. +func machinePoolToInfrastructureMapFunc(gvk schema.GroupVersionKind) handler.MapFunc { + return func(_ context.Context, o client.Object) []ctrl.Request { + m, ok := o.(*expclusterv1.MachinePool) + if !ok { + return nil + } + + gk := gvk.GroupKind() + ref := m.Spec.Template.Spec.InfrastructureRef + // Return early if the GroupKind doesn't match what we expect. + infraGK := ref.GroupVersionKind().GroupKind() + if gk != infraGK { + return nil + } + + return []ctrl.Request{ + { + NamespacedName: client.ObjectKey{ + Namespace: m.Namespace, + Name: ref.Name, + }, + }, + } + } +} + +// getOwnerClusterKey returns only the Cluster name and namespace. +func getOwnerClusterKey(obj metav1.ObjectMeta) (*client.ObjectKey, error) { + for _, ref := range obj.OwnerReferences { + if ref.Kind != "Cluster" { + continue + } + gv, err := schema.ParseGroupVersion(ref.APIVersion) + if err != nil { + return nil, fmt.Errorf("failed to parse group version: %w", err) + } + if gv.Group == clusterv1.GroupVersion.Group { + return &client.ObjectKey{ + Namespace: obj.Namespace, + Name: ref.Name, + }, nil + } + } + return nil, nil +} + +func managedControlPlaneToManagedMachinePoolMapFunc(ctx context.Context, c client.Client, gvk schema.GroupVersionKind) handler.MapFunc { + log := logf.FromContext(ctx) + + return func(ctx context.Context, o client.Object) []ctrl.Request { + scalewayManagedControlPlane, ok := o.(*infrav1.ScalewayManagedControlPlane) + if !ok { + panic(fmt.Sprintf("Expected a ScalewayManagedControlPlane but got a %T", o)) + } + + if !scalewayManagedControlPlane.DeletionTimestamp.IsZero() { + return nil + } + + clusterKey, err := getOwnerClusterKey(scalewayManagedControlPlane.ObjectMeta) + if err != nil { + log.Error(err, "couldn't get ScalewayManagedControlPlane owner ObjectKey") + return nil + } + if clusterKey == nil { + return nil + } + + managedPoolForClusterList := expclusterv1.MachinePoolList{} + if err := c.List( + ctx, &managedPoolForClusterList, client.InNamespace(clusterKey.Namespace), client.MatchingLabels{clusterv1.ClusterNameLabel: clusterKey.Name}, + ); err != nil { + log.Error(err, "couldn't list pools for cluster") + return nil + } + + mapFunc := machinePoolToInfrastructureMapFunc(gvk) + + var results []ctrl.Request + for i := range managedPoolForClusterList.Items { + managedPool := mapFunc(ctx, &managedPoolForClusterList.Items[i]) + results = append(results, managedPool...) + } + + return results + } +} diff --git a/internal/controller/scalewaymanagedmachinepool_controller_test.go b/internal/controller/scalewaymanagedmachinepool_controller_test.go new file mode 100644 index 0000000..997e6d8 --- /dev/null +++ b/internal/controller/scalewaymanagedmachinepool_controller_test.go @@ -0,0 +1,477 @@ +package controller + +import ( + "context" + "reflect" + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + infrav1 "github.com/scaleway/cluster-api-provider-scaleway/api/v1alpha1" + "github.com/scaleway/cluster-api-provider-scaleway/internal/scope" + "github.com/scaleway/scaleway-sdk-go/scw" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var _ = Describe("ScalewayManagedMachinePool Controller", func() { + Context("When reconciling a resource", func() { + const resourceName = "test-resource" + + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", + } + scalewaymanagedmachinepool := &infrav1.ScalewayManagedMachinePool{} + + BeforeEach(func() { + By("creating the custom resource for the Kind ScalewayManagedMachinePool") + err := k8sClient.Get(ctx, typeNamespacedName, scalewaymanagedmachinepool) + if err != nil && errors.IsNotFound(err) { + resource := &infrav1.ScalewayManagedMachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + Spec: infrav1.ScalewayManagedMachinePoolSpec{ + NodeType: "DEV1-S", + Zone: string(scw.ZoneFrPar1), + }, + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + resource := &infrav1.ScalewayManagedMachinePool{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance ScalewayManagedMachinePool") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + It("should successfully reconcile the resource", func() { + By("Reconciling the created resource") + controllerReconciler := &ScalewayManagedMachinePoolReconciler{ + Client: k8sClient, + createScalewayManagedMachinePoolService: newScalewayManagedMachinePoolService, + } + + _, err := controllerReconciler.Reconcile(ctx, reconcile.Request{ + NamespacedName: typeNamespacedName, + }) + Expect(err).NotTo(HaveOccurred()) + }) + }) +}) + +var _ = Describe("ScalewayManagedMachinePool", func() { + Context("When updating the resource", func() { + When("Basic machine pool", func() { + const resourceName = "test-resource-1" + ctx := context.Background() + + typeNamespacedName := types.NamespacedName{ + Name: resourceName, + Namespace: "default", + } + scalewaymanagedmachinepool := &infrav1.ScalewayManagedMachinePool{} + + BeforeEach(func() { + By("creating the custom resource for the Kind ScalewayManagedMachinePool") + err := k8sClient.Get(ctx, typeNamespacedName, scalewaymanagedmachinepool) + if err != nil && errors.IsNotFound(err) { + resource := &infrav1.ScalewayManagedMachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: resourceName, + Namespace: "default", + }, + Spec: infrav1.ScalewayManagedMachinePoolSpec{ + NodeType: "DEV1-S", + Zone: string(scw.ZoneFrPar1), + }, + } + Expect(k8sClient.Create(ctx, resource)).To(Succeed()) + } + }) + + AfterEach(func() { + resource := &infrav1.ScalewayManagedMachinePool{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + By("Cleanup the specific resource instance ScalewayManagedMachinePool") + Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) + }) + + It("should fail to update Node Type", func(ctx SpecContext) { + By("Setting Node Type") + resource := &infrav1.ScalewayManagedMachinePool{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + resource.Spec.NodeType = "DEV1-M" + Expect(k8sClient.Update(ctx, resource)).NotTo(Succeed()) + }) + + It("should fail to update Zone", func(ctx SpecContext) { + By("Setting Zone") + resource := &infrav1.ScalewayManagedMachinePool{} + err := k8sClient.Get(ctx, typeNamespacedName, resource) + Expect(err).NotTo(HaveOccurred()) + + resource.Spec.Zone = string(scw.ZoneFrPar2) + Expect(k8sClient.Update(ctx, resource)).NotTo(Succeed()) + }) + }) + }) +}) + +var ( + scalewayManagedMachinePoolNamespacedName = types.NamespacedName{ + Namespace: "caps", + Name: "scalewaymanagedmachinepool", + } + machinePoolNamespacedName = types.NamespacedName{ + Namespace: "caps", + Name: "machinepool", + } +) + +func TestScalewayManagedMachinePoolReconciler_Reconcile(t *testing.T) { + t.Parallel() + type fields struct { + createScalewayManagedMachinePoolService scalewayManagedMachinePoolServiceCreator + } + type args struct { + ctx context.Context + req ctrl.Request + } + tests := []struct { + name string + fields fields + args args + want ctrl.Result + wantErr bool + objects []client.Object + asserts func(g *WithT, c client.Client) + }{ + { + name: "should reconcile normally", + fields: fields{ + createScalewayManagedMachinePoolService: func(managedMachinePoolScope *scope.ManagedMachinePool) *scalewayManagedMachinePoolService { + return &scalewayManagedMachinePoolService{ + scope: managedMachinePoolScope, + Reconcile: func(ctx context.Context) error { return nil }, + Delete: func(ctx context.Context) error { return nil }, + } + }, + }, + args: args{ + ctx: context.TODO(), + req: reconcile.Request{NamespacedName: scalewayManagedMachinePoolNamespacedName}, + }, + want: reconcile.Result{}, + objects: []client.Object{ + &expclusterv1.MachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: machinePoolNamespacedName.Name, + Namespace: machinePoolNamespacedName.Namespace, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: clusterNamespacedName.Name, + }, + }, + Spec: expclusterv1.MachinePoolSpec{ + ClusterName: clusterNamespacedName.Name, + Template: clusterv1.MachineTemplateSpec{ + Spec: clusterv1.MachineSpec{ + ClusterName: clusterNamespacedName.Name, + InfrastructureRef: corev1.ObjectReference{ + Name: scalewayManagedMachinePoolNamespacedName.Name, + Namespace: scalewayManagedMachinePoolNamespacedName.Namespace, + }, + }, + }, + }, + }, + &infrav1.ScalewayManagedMachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: scalewayManagedMachinePoolNamespacedName.Name, + Namespace: scalewayManagedMachinePoolNamespacedName.Namespace, + OwnerReferences: []metav1.OwnerReference{ + { + Name: machinePoolNamespacedName.Name, + Kind: "MachinePool", + APIVersion: expclusterv1.GroupVersion.String(), + }, + }, + }, + Spec: infrav1.ScalewayManagedMachinePoolSpec{ + NodeType: "DEV1-S", + Zone: scw.ZoneFrPar1.String(), + }, + }, + &infrav1.ScalewayManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: scalewayManagedClusterNamespacedName.Name, + Namespace: scalewayManagedClusterNamespacedName.Namespace, + OwnerReferences: []metav1.OwnerReference{ + { + Name: clusterNamespacedName.Name, + Kind: "Cluster", + APIVersion: clusterv1.GroupVersion.String(), + }, + }, + }, + Spec: infrav1.ScalewayManagedClusterSpec{ + Region: "fr-par", + ScalewaySecretName: secretNamespacedName.Name, + ProjectID: "11111111-1111-1111-1111-111111111111", + }, + Status: infrav1.ScalewayManagedClusterStatus{ + Ready: true, + }, + }, + &infrav1.ScalewayManagedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: scalewayManagedControlPlaneNamespacedName.Name, + Namespace: scalewayManagedControlPlaneNamespacedName.Namespace, + OwnerReferences: []metav1.OwnerReference{ + { + Name: clusterNamespacedName.Name, + Kind: "Cluster", + APIVersion: clusterv1.GroupVersion.String(), + }, + }, + }, + Spec: infrav1.ScalewayManagedControlPlaneSpec{ + Type: "kapsule", + Version: "v1.30.0", + ControlPlaneEndpoint: clusterv1.APIEndpoint{ + Host: managedEndpoint.Host, + Port: managedEndpoint.Port, + }, + }, + }, + &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterNamespacedName.Name, + Namespace: clusterNamespacedName.Namespace, + }, + Spec: clusterv1.ClusterSpec{ + ControlPlaneRef: &corev1.ObjectReference{ + Name: scalewayManagedControlPlaneNamespacedName.Name, + Namespace: scalewayManagedControlPlaneNamespacedName.Namespace, + }, + InfrastructureRef: &corev1.ObjectReference{ + Name: scalewayManagedClusterNamespacedName.Name, + Namespace: scalewayManagedClusterNamespacedName.Namespace, + }, + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretNamespacedName.Name, + Namespace: secretNamespacedName.Namespace, + }, + Data: map[string][]byte{ + scw.ScwAccessKeyEnv: []byte("SCWXXXXXXXXXXXXXXXXX"), + scw.ScwSecretKeyEnv: []byte("11111111-1111-1111-1111-111111111111"), + }, + }, + }, + asserts: func(g *WithT, c client.Client) { + // ScalewayManagedMachinePool checks + smmp := &infrav1.ScalewayManagedMachinePool{} + g.Expect(c.Get(context.TODO(), scalewayManagedMachinePoolNamespacedName, smmp)).To(Succeed()) + g.Expect(smmp.Status.Ready).To(BeTrue()) + g.Expect(smmp.Finalizers).To(ContainElement(infrav1.ManagedMachinePoolFinalizer)) + }, + }, + { + name: "should reconcile deletion", + fields: fields{ + createScalewayManagedMachinePoolService: func(managedMachinePoolScope *scope.ManagedMachinePool) *scalewayManagedMachinePoolService { + return &scalewayManagedMachinePoolService{ + scope: managedMachinePoolScope, + Reconcile: func(ctx context.Context) error { return nil }, + Delete: func(ctx context.Context) error { return nil }, + } + }, + }, + args: args{ + ctx: context.TODO(), + req: reconcile.Request{NamespacedName: scalewayManagedMachinePoolNamespacedName}, + }, + want: reconcile.Result{}, + objects: []client.Object{ + &expclusterv1.MachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: machinePoolNamespacedName.Name, + Namespace: machinePoolNamespacedName.Namespace, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: clusterNamespacedName.Name, + }, + }, + Spec: expclusterv1.MachinePoolSpec{ + ClusterName: clusterNamespacedName.Name, + Template: clusterv1.MachineTemplateSpec{ + Spec: clusterv1.MachineSpec{ + ClusterName: clusterNamespacedName.Name, + InfrastructureRef: corev1.ObjectReference{ + Name: scalewayManagedMachinePoolNamespacedName.Name, + Namespace: scalewayManagedMachinePoolNamespacedName.Namespace, + }, + }, + }, + }, + }, + &infrav1.ScalewayManagedMachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: scalewayManagedMachinePoolNamespacedName.Name, + Namespace: scalewayManagedMachinePoolNamespacedName.Namespace, + OwnerReferences: []metav1.OwnerReference{ + { + Name: machinePoolNamespacedName.Name, + Kind: "MachinePool", + APIVersion: expclusterv1.GroupVersion.String(), + }, + }, + Finalizers: []string{infrav1.ManagedMachinePoolFinalizer}, + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + }, + Spec: infrav1.ScalewayManagedMachinePoolSpec{ + NodeType: "DEV1-S", + Zone: scw.ZoneFrPar1.String(), + }, + }, + &infrav1.ScalewayManagedCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: scalewayManagedClusterNamespacedName.Name, + Namespace: scalewayManagedClusterNamespacedName.Namespace, + OwnerReferences: []metav1.OwnerReference{ + { + Name: clusterNamespacedName.Name, + Kind: "Cluster", + APIVersion: clusterv1.GroupVersion.String(), + }, + }, + }, + Spec: infrav1.ScalewayManagedClusterSpec{ + Region: "fr-par", + ScalewaySecretName: secretNamespacedName.Name, + ProjectID: "11111111-1111-1111-1111-111111111111", + }, + Status: infrav1.ScalewayManagedClusterStatus{ + Ready: true, + }, + }, + &infrav1.ScalewayManagedControlPlane{ + ObjectMeta: metav1.ObjectMeta{ + Name: scalewayManagedControlPlaneNamespacedName.Name, + Namespace: scalewayManagedControlPlaneNamespacedName.Namespace, + OwnerReferences: []metav1.OwnerReference{ + { + Name: clusterNamespacedName.Name, + Kind: "Cluster", + APIVersion: clusterv1.GroupVersion.String(), + }, + }, + }, + Spec: infrav1.ScalewayManagedControlPlaneSpec{ + Type: "kapsule", + Version: "v1.30.0", + ControlPlaneEndpoint: clusterv1.APIEndpoint{ + Host: managedEndpoint.Host, + Port: managedEndpoint.Port, + }, + }, + }, + &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterNamespacedName.Name, + Namespace: clusterNamespacedName.Namespace, + }, + Spec: clusterv1.ClusterSpec{ + ControlPlaneRef: &corev1.ObjectReference{ + Name: scalewayManagedControlPlaneNamespacedName.Name, + Namespace: scalewayManagedControlPlaneNamespacedName.Namespace, + }, + InfrastructureRef: &corev1.ObjectReference{ + Name: scalewayManagedClusterNamespacedName.Name, + Namespace: scalewayManagedClusterNamespacedName.Namespace, + }, + }, + }, + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretNamespacedName.Name, + Namespace: secretNamespacedName.Namespace, + }, + Data: map[string][]byte{ + scw.ScwAccessKeyEnv: []byte("SCWXXXXXXXXXXXXXXXXX"), + scw.ScwSecretKeyEnv: []byte("11111111-1111-1111-1111-111111111111"), + }, + }, + }, + asserts: func(g *WithT, c client.Client) { + // ScalewayManagedMachinePool should not exist anymore if the finalizer was correctly removed. + smmp := &infrav1.ScalewayManagedMachinePool{} + g.Expect(c.Get(context.TODO(), scalewayManagedMachinePoolNamespacedName, smmp)).NotTo(Succeed()) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + g := NewWithT(t) + sb := runtime.NewSchemeBuilder( + corev1.AddToScheme, + clusterv1.AddToScheme, + infrav1.AddToScheme, + expclusterv1.AddToScheme, + ) + s := runtime.NewScheme() + + g.Expect(sb.AddToScheme(s)).To(Succeed()) + + runtimeObjects := make([]runtime.Object, 0, len(tt.objects)) + for _, obj := range tt.objects { + runtimeObjects = append(runtimeObjects, obj) + } + + c := fake.NewClientBuilder(). + WithScheme(s). + WithRuntimeObjects(runtimeObjects...). + WithStatusSubresource(tt.objects...). + Build() + + r := &ScalewayManagedMachinePoolReconciler{ + Client: c, + createScalewayManagedMachinePoolService: tt.fields.createScalewayManagedMachinePoolService, + } + got, err := r.Reconcile(tt.args.ctx, tt.args.req) + if (err != nil) != tt.wantErr { + t.Errorf("ScalewayManagedMachinePoolReconciler.Reconcile() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("ScalewayManagedMachinePoolReconciler.Reconcile() = %v, want %v", got, tt.want) + } + tt.asserts(g, c) + }) + } +} diff --git a/internal/controller/scalewaymanagedmachinepool_reconciler.go b/internal/controller/scalewaymanagedmachinepool_reconciler.go new file mode 100644 index 0000000..ee7182b --- /dev/null +++ b/internal/controller/scalewaymanagedmachinepool_reconciler.go @@ -0,0 +1,53 @@ +package controller + +import ( + "context" + "fmt" + + "github.com/scaleway/cluster-api-provider-scaleway/internal/scope" + "github.com/scaleway/cluster-api-provider-scaleway/internal/service/scaleway" + "github.com/scaleway/cluster-api-provider-scaleway/internal/service/scaleway/k8s/pool" +) + +type scalewayManagedMachinePoolService struct { + scope *scope.ManagedMachinePool + // services is the list of services that are reconciled by this controller. + // The order of the services is important as it determines the order in which the services are reconciled. + services []scaleway.ServiceReconciler + Reconcile func(context.Context) error + Delete func(context.Context) error +} + +func newScalewayManagedMachinePoolService(s *scope.ManagedMachinePool) *scalewayManagedMachinePoolService { + svc := &scalewayManagedMachinePoolService{ + scope: s, + services: []scaleway.ServiceReconciler{ + pool.New(s), + }, + } + + svc.Reconcile = svc.reconcile + svc.Delete = svc.delete + + return svc +} + +func (s *scalewayManagedMachinePoolService) reconcile(ctx context.Context) error { + for _, service := range s.services { + if err := service.Reconcile(ctx); err != nil { + return fmt.Errorf("failed to reconcile ScalewayManagedMachinePool service %s: %w", service.Name(), err) + } + } + + return nil +} + +func (s *scalewayManagedMachinePoolService) delete(ctx context.Context) error { + for i := len(s.services) - 1; i >= 0; i-- { + if err := s.services[i].Delete(ctx); err != nil { + return fmt.Errorf("failed to delete ScalewayManagedMachinePool service %s: %w", s.services[i].Name(), err) + } + } + + return nil +} diff --git a/internal/scope/cluster.go b/internal/scope/cluster.go index 038b114..db948bd 100644 --- a/internal/scope/cluster.go +++ b/internal/scope/cluster.go @@ -5,13 +5,11 @@ import ( "errors" "fmt" "slices" - "strings" infrav1 "github.com/scaleway/cluster-api-provider-scaleway/api/v1alpha1" scwClient "github.com/scaleway/cluster-api-provider-scaleway/internal/service/scaleway/client" "github.com/scaleway/scaleway-sdk-go/scw" - corev1 "k8s.io/api/core/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" @@ -39,22 +37,9 @@ type ClusterParams struct { // NewCluster creates a new Cluster scope. func NewCluster(ctx context.Context, params *ClusterParams) (*Cluster, error) { - region, err := scw.ParseRegion(params.ScalewayCluster.Spec.Region) + c, err := newScalewayClientForScalewayCluster(ctx, params.Client, params.ScalewayCluster) if err != nil { - return nil, fmt.Errorf("unable to parse region %q: %w", params.ScalewayCluster.Spec.Region, err) - } - - secret := &corev1.Secret{} - if err := params.Client.Get(ctx, client.ObjectKey{ - Name: params.ScalewayCluster.Spec.ScalewaySecretName, - Namespace: params.ScalewayCluster.Namespace, - }, secret); err != nil { - return nil, fmt.Errorf("failed to get ScalewaySecret: %w", err) - } - - c, err := scwClient.New(region, params.ScalewayCluster.Spec.ProjectID, secret.Data) - if err != nil { - return nil, fmt.Errorf("failed to create Scaleway client from ScalewaySecret: %w", err) + return nil, err } helper, err := patch.NewHelper(params.ScalewayCluster, params.Client) @@ -80,10 +65,10 @@ func (c *Cluster) Close(ctx context.Context) error { return c.PatchObject(ctx) } -// ResourceNameName returns the name/prefix that resources created for the cluster should have. +// ResourceName returns the name/prefix that resources created for the cluster should have. // It is possible to provide additional suffixes that will be appended to the name with a leading "-". func (c *Cluster) ResourceName(suffixes ...string) string { - return strings.Join(append([]string{c.ScalewayCluster.Name}, suffixes...), "-") + return nameWithSuffixes(c.ScalewayCluster.Name, suffixes...) } // ResourceTags returns the tags that resources created for the cluster should have. @@ -96,18 +81,29 @@ func (c *Cluster) ResourceTags(additional ...string) []string { }, additional...) } +// SetCloud sets the Scaleway client object. +func (c *Cluster) SetCloud(sc scwClient.Interface) { + c.ScalewayClient = sc +} + +// Cloud returns the initialized Scaleway client object. +func (c *Cluster) Cloud() scwClient.Interface { + return c.ScalewayClient +} + // HasPrivateNetwork returns true if the cluster has a Private Network. func (c *Cluster) HasPrivateNetwork() bool { return c.ScalewayCluster.Spec.Network != nil && c.ScalewayCluster.Spec.Network.PrivateNetwork.Enabled } -// ShouldManagePrivateNetwork returns true if the provider should manage the -// Private Network of the cluster. -func (c *Cluster) ShouldManagePrivateNetwork() bool { - return c.HasPrivateNetwork() && - c.ScalewayCluster.Spec.Network.PrivateNetwork != nil && - c.ScalewayCluster.Spec.Network.PrivateNetwork.ID == nil +// PrivateNetworkParams returns the private network parameters. +func (c *Cluster) PrivateNetworkParams() infrav1.PrivateNetworkParams { + if c.ScalewayCluster.Spec.Network == nil || c.ScalewayCluster.Spec.Network.PrivateNetwork == nil { + return infrav1.PrivateNetworkParams{} + } + + return c.ScalewayCluster.Spec.Network.PrivateNetwork.PrivateNetworkParams } // PrivateNetworkID returns the PrivateNetwork ID of the cluster, obtained from @@ -260,27 +256,21 @@ func (c *Cluster) ControlPlaneLoadBalancerPrivate() bool { *c.ScalewayCluster.Spec.Network.ControlPlaneLoadBalancer.Private } -// SetStatusPrivateNetworkID sets the Private Network ID in the status of the -// ScalewayCluster object. -func (c *Cluster) SetStatusPrivateNetworkID(pnID string) { - if c.ScalewayCluster.Status.Network == nil { - c.ScalewayCluster.Status.Network = &infrav1.NetworkStatus{ - PrivateNetworkID: &pnID, - } - } else { - c.ScalewayCluster.Status.Network.PrivateNetworkID = &pnID - } +// IsVPCStatusSet if the VPC fields are set in the status. +func (c *Cluster) IsVPCStatusSet() bool { + return c.ScalewayCluster.Status.Network != nil && + c.ScalewayCluster.Status.Network.PrivateNetworkID != nil && + c.ScalewayCluster.Status.Network.VPCID != nil } -// SetStatusVPCID sets the VPC ID in the status of the ScalewayCluster object. -func (c *Cluster) SetStatusVPCID(vpcID string) { +// SetVPCStatus sets the VPC fields in the status. +func (c *Cluster) SetVPCStatus(pnID, vpcID string) { if c.ScalewayCluster.Status.Network == nil { - c.ScalewayCluster.Status.Network = &infrav1.NetworkStatus{ - VPCID: &vpcID, - } - } else { - c.ScalewayCluster.Status.Network.VPCID = &vpcID + c.ScalewayCluster.Status.Network = &infrav1.NetworkStatus{} } + + c.ScalewayCluster.Status.Network.PrivateNetworkID = &pnID + c.ScalewayCluster.Status.Network.VPCID = &vpcID } // SetStatusLoadBalancerIP sets the loadbalancer IP in the status. @@ -315,3 +305,12 @@ func (c *Cluster) SetFailureDomains(zones []scw.Zone) { } } } + +// PublicGateways returns the desired Public Gateways. +func (c *Cluster) PublicGateways() []infrav1.PublicGatewaySpec { + if c.ScalewayCluster.Spec.Network == nil { + return nil + } + + return c.ScalewayCluster.Spec.Network.PublicGateways +} diff --git a/internal/scope/cluster_test.go b/internal/scope/cluster_test.go index 24e25af..340dd96 100644 --- a/internal/scope/cluster_test.go +++ b/internal/scope/cluster_test.go @@ -177,68 +177,6 @@ func TestCluster_HasPrivateNetwork(t *testing.T) { } } -func TestCluster_ShouldManagePrivateNetwork(t *testing.T) { - t.Parallel() - type fields struct { - ScalewayCluster *infrav1.ScalewayCluster - } - tests := []struct { - name string - fields fields - want bool - }{ - { - name: "empty spec", - fields: fields{ - ScalewayCluster: &infrav1.ScalewayCluster{}, - }, - want: false, - }, - { - name: "existing private network", - fields: fields{ - ScalewayCluster: &infrav1.ScalewayCluster{ - Spec: infrav1.ScalewayClusterSpec{ - Network: &infrav1.NetworkSpec{ - PrivateNetwork: &infrav1.PrivateNetworkSpec{ - Enabled: true, - ID: scw.StringPtr(privateNetworkID), - }, - }, - }, - }, - }, - want: false, - }, - { - name: "managed", - fields: fields{ - ScalewayCluster: &infrav1.ScalewayCluster{ - Spec: infrav1.ScalewayClusterSpec{ - Network: &infrav1.NetworkSpec{ - PrivateNetwork: &infrav1.PrivateNetworkSpec{ - Enabled: true, - }, - }, - }, - }, - }, - want: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - t.Parallel() - c := &Cluster{ - ScalewayCluster: tt.fields.ScalewayCluster, - } - if got := c.ShouldManagePrivateNetwork(); got != tt.want { - t.Errorf("Cluster.ShouldManagePrivateNetwork() = %v, want %v", got, tt.want) - } - }) - } -} - func TestCluster_PrivateNetworkID(t *testing.T) { type fields struct { ScalewayCluster *infrav1.ScalewayCluster @@ -643,3 +581,65 @@ func TestCluster_ControlPlaneHost(t *testing.T) { }) } } + +func TestCluster_IsVPCStatusSet(t *testing.T) { + t.Parallel() + type fields struct { + patchHelper *patch.Helper + Cluster *clusterv1.Cluster + ScalewayCluster *infrav1.ScalewayCluster + ScalewayClient scwClient.Interface + } + tests := []struct { + name string + fields fields + want bool + }{ + { + name: "network status not set", + fields: fields{ + ScalewayCluster: &infrav1.ScalewayCluster{}, + }, + want: false, + }, + { + name: "network status set but not VPC", + fields: fields{ + ScalewayCluster: &infrav1.ScalewayCluster{ + Status: infrav1.ScalewayClusterStatus{ + Network: &infrav1.NetworkStatus{}, + }, + }, + }, + want: false, + }, + { + name: "vpc status set", + fields: fields{ + ScalewayCluster: &infrav1.ScalewayCluster{ + Status: infrav1.ScalewayClusterStatus{ + Network: &infrav1.NetworkStatus{ + VPCID: scw.StringPtr(vpcID), + PrivateNetworkID: scw.StringPtr(privateNetworkID), + }, + }, + }, + }, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + c := &Cluster{ + patchHelper: tt.fields.patchHelper, + Cluster: tt.fields.Cluster, + ScalewayCluster: tt.fields.ScalewayCluster, + ScalewayClient: tt.fields.ScalewayClient, + } + if got := c.IsVPCStatusSet(); got != tt.want { + t.Errorf("Cluster.IsVPCStatusSet() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/internal/scope/helpers.go b/internal/scope/helpers.go new file mode 100644 index 0000000..df9d4ac --- /dev/null +++ b/internal/scope/helpers.go @@ -0,0 +1,83 @@ +package scope + +import ( + "context" + "fmt" + "strings" + + infrav1 "github.com/scaleway/cluster-api-provider-scaleway/api/v1alpha1" + scwClient "github.com/scaleway/cluster-api-provider-scaleway/internal/service/scaleway/client" + "github.com/scaleway/scaleway-sdk-go/scw" + "golang.org/x/crypto/blake2b" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const base36set = "0123456789abcdefghijklmnopqrstuvwxyz" + +func nameWithSuffixes(name string, suffixes ...string) string { + return strings.Join(append([]string{name}, suffixes...), "-") +} + +func newScalewayClient(ctx context.Context, c client.Client, region, projectID string, secretRef client.ObjectKey) (*scwClient.Client, error) { + r, err := scw.ParseRegion(region) + if err != nil { + return nil, fmt.Errorf("unable to parse region %q: %w", r, err) + } + + secret := &corev1.Secret{} + if err := c.Get(ctx, secretRef, secret); err != nil { + return nil, fmt.Errorf("failed to get ScalewaySecret: %w", err) + } + + sc, err := scwClient.New(r, projectID, secret.Data) + if err != nil { + return nil, fmt.Errorf("failed to create Scaleway client from ScalewaySecret: %w", err) + } + + return sc, nil +} + +func newScalewayClientForScalewayCluster(ctx context.Context, c client.Client, sc *infrav1.ScalewayCluster) (*scwClient.Client, error) { + return newScalewayClient(ctx, c, sc.Spec.Region, sc.Spec.ProjectID, types.NamespacedName{ + Namespace: sc.Namespace, + Name: sc.Spec.ScalewaySecretName, + }) +} + +func newScalewayClientForScalewayManagedCluster(ctx context.Context, c client.Client, smc *infrav1.ScalewayManagedCluster) (*scwClient.Client, error) { + return newScalewayClient(ctx, c, smc.Spec.Region, smc.Spec.ProjectID, types.NamespacedName{ + Namespace: smc.Namespace, + Name: smc.Spec.ScalewaySecretName, + }) +} + +// base36TruncatedHash returns a consistent hash using blake2b +// and truncating the byte values to alphanumeric only +// of a fixed length specified by the consumer. +func base36TruncatedHash(str string, hashLen int) (string, error) { + hasher, err := blake2b.New(hashLen, nil) + if err != nil { + return "", fmt.Errorf("unable to create hash function: %w", err) + } + + if _, err := hasher.Write([]byte(str)); err != nil { + return "", fmt.Errorf("unable to write hash: %w", err) + } + + return base36Truncate(hasher.Sum(nil)), nil +} + +// base36Truncate returns a string that is base36 compliant +// It is not an encoding since it returns a same-length string +// for any byte value. +func base36Truncate(bytes []byte) string { + var chars string + for _, bite := range bytes { + idx := int(bite) % 36 + chars += string(base36set[idx]) + } + + return chars +} diff --git a/internal/scope/helpers_test.go b/internal/scope/helpers_test.go new file mode 100644 index 0000000..fdd6879 --- /dev/null +++ b/internal/scope/helpers_test.go @@ -0,0 +1,39 @@ +package scope + +import "testing" + +func Test_base36TruncatedHash(t *testing.T) { + t.Parallel() + type args struct { + str string + hashLen int + } + tests := []struct { + name string + args args + want string + wantErr bool + }{ + { + name: "hash value", + args: args{ + str: "test", + hashLen: 16, + }, + want: "wo9lxbufgjnn34bj", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + got, err := base36TruncatedHash(tt.args.str, tt.args.hashLen) + if (err != nil) != tt.wantErr { + t.Errorf("base36TruncatedHash() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("base36TruncatedHash() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/internal/scope/managedcluster.go b/internal/scope/managedcluster.go new file mode 100644 index 0000000..3b3a15b --- /dev/null +++ b/internal/scope/managedcluster.go @@ -0,0 +1,143 @@ +package scope + +import ( + "context" + "errors" + "fmt" + "strings" + + infrav1 "github.com/scaleway/cluster-api-provider-scaleway/api/v1alpha1" + scwClient "github.com/scaleway/cluster-api-provider-scaleway/internal/service/scaleway/client" + "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type ManagedCluster struct { + patchHelper *patch.Helper + + ManagedCluster *infrav1.ScalewayManagedCluster + ManagedControlPlane *infrav1.ScalewayManagedControlPlane // ManagedControlPlane may be nil, on Cluster deletion. + ScalewayClient scwClient.Interface +} + +// ClusterParams contains mandatory params for creating the Cluster scope. +type ManagedClusterParams struct { + Client client.Client + ManagedCluster *infrav1.ScalewayManagedCluster + ManagedControlPlane *infrav1.ScalewayManagedControlPlane +} + +// NewManagedCluster creates a new Cluster scope. +func NewManagedCluster(ctx context.Context, params *ManagedClusterParams) (*ManagedCluster, error) { + c, err := newScalewayClientForScalewayManagedCluster(ctx, params.Client, params.ManagedCluster) + if err != nil { + return nil, err + } + + helper, err := patch.NewHelper(params.ManagedCluster, params.Client) + if err != nil { + return nil, fmt.Errorf("failed to create patch helper for ScalewayCluster: %w", err) + } + + return &ManagedCluster{ + patchHelper: helper, + ScalewayClient: c, + ManagedCluster: params.ManagedCluster, + ManagedControlPlane: params.ManagedControlPlane, + }, nil +} + +// PatchObject patches the ScalewayManagedCluster object. +func (m *ManagedCluster) PatchObject(ctx context.Context) error { + return m.patchHelper.Patch(ctx, m.ManagedCluster) +} + +// Close closes the Machine scope by patching the ScalewayManagedCluster object. +func (m *ManagedCluster) Close(ctx context.Context) error { + return m.PatchObject(ctx) +} + +// ResourceName returns the name/prefix that resources created for the cluster should have. +// It is possible to provide additional suffixes that will be appended to the name with a leading "-". +func (c *ManagedCluster) ResourceName(suffixes ...string) string { + return nameWithSuffixes(c.ManagedCluster.Name, suffixes...) +} + +// ResourceTags returns the tags that resources created for the cluster should have. +// It is possible to provide additional tags that will be added to the default tags. +func (c *ManagedCluster) ResourceTags(additional ...string) []string { + return append( + []string{ + fmt.Sprintf("caps-namespace=%s", c.ManagedCluster.Namespace), + fmt.Sprintf("caps-scalewaymanagedcluster=%s", c.ManagedCluster.Name), + }, additional...) +} + +// SetCloud sets the Scaleway client object. +func (c *ManagedCluster) SetCloud(sc scwClient.Interface) { + c.ScalewayClient = sc +} + +// Cloud returns the initialized Scaleway client object. +func (c *ManagedCluster) Cloud() scwClient.Interface { + return c.ScalewayClient +} + +// HasPrivateNetwork returns true if the cluster should have a Private Network. +// It's only false if the multicloud cluster type is used. +func (c *ManagedCluster) HasPrivateNetwork() bool { + // On Cluster deletion, we no longer have the info, we have to return true + // to force private network cleanup. + if c.ManagedControlPlane == nil { + return true + } + + return !strings.HasPrefix(c.ManagedControlPlane.Spec.Type, "multicloud") +} + +// IsVPCStatusSet if the VPC fields are set in the status. +func (c *ManagedCluster) IsVPCStatusSet() bool { + return c.ManagedCluster.Status.Network != nil && + c.ManagedCluster.Status.Network.PrivateNetworkID != nil +} + +// PrivateNetworkParams returns the private network parameters. +func (c *ManagedCluster) PrivateNetworkParams() infrav1.PrivateNetworkParams { + if c.ManagedCluster.Spec.Network == nil || c.ManagedCluster.Spec.Network.PrivateNetwork == nil { + return infrav1.PrivateNetworkParams{} + } + + return *c.ManagedCluster.Spec.Network.PrivateNetwork +} + +// SetVPCStatus sets the VPC fields in the status. +func (c *ManagedCluster) SetVPCStatus(pnID, _ string) { + if c.ManagedCluster.Status.Network == nil { + c.ManagedCluster.Status.Network = &infrav1.ManagedNetworkStatus{} + } + + c.ManagedCluster.Status.Network.PrivateNetworkID = &pnID +} + +// PrivateNetworkID returns the PrivateNetwork ID of the managed cluster, obtained from +// the status of the ScalewayManagedCluster resource. +func (c *ManagedCluster) PrivateNetworkID() (string, error) { + if !c.HasPrivateNetwork() { + return "", errors.New("cluster has no Private Network") + } + + if c.ManagedCluster.Status.Network == nil || c.ManagedCluster.Status.Network.PrivateNetworkID == nil { + return "", errors.New("PrivateNetworkID not found in ScalewayManagedCluster status") + } + + return *c.ManagedCluster.Status.Network.PrivateNetworkID, nil +} + +// PublicGateways returns the desired Public Gateways. +func (c *ManagedCluster) PublicGateways() []infrav1.PublicGatewaySpec { + if c.ManagedCluster.Spec.Network == nil { + return nil + } + + return c.ManagedCluster.Spec.Network.PublicGateways +} diff --git a/internal/scope/managedcluster_test.go b/internal/scope/managedcluster_test.go new file mode 100644 index 0000000..d19ecf7 --- /dev/null +++ b/internal/scope/managedcluster_test.go @@ -0,0 +1,77 @@ +package scope + +import ( + "testing" + + infrav1 "github.com/scaleway/cluster-api-provider-scaleway/api/v1alpha1" + scwClient "github.com/scaleway/cluster-api-provider-scaleway/internal/service/scaleway/client" + "sigs.k8s.io/cluster-api/util/patch" +) + +func TestManagedCluster_HasPrivateNetwork(t *testing.T) { + type fields struct { + patchHelper *patch.Helper + ManagedCluster *infrav1.ScalewayManagedCluster + ManagedControlPlane *infrav1.ScalewayManagedControlPlane + ScalewayClient scwClient.Interface + } + tests := []struct { + name string + fields fields + want bool + }{ + { + name: "multicloud has no private network", + fields: fields{ + ManagedControlPlane: &infrav1.ScalewayManagedControlPlane{ + Spec: infrav1.ScalewayManagedControlPlaneSpec{ + Type: "multicloud", + }, + }, + }, + want: false, + }, + { + name: "multicloud-dedicated-4 has no private network", + fields: fields{ + ManagedControlPlane: &infrav1.ScalewayManagedControlPlane{ + Spec: infrav1.ScalewayManagedControlPlaneSpec{ + Type: "multicloud-dedicated-4", + }, + }, + }, + want: false, + }, + { + name: "kapsule has private network", + fields: fields{ + ManagedControlPlane: &infrav1.ScalewayManagedControlPlane{ + Spec: infrav1.ScalewayManagedControlPlaneSpec{ + Type: "kapsule", + }, + }, + }, + want: true, + }, + { + name: "assume a private network if ManagedControlPlane is nil", + fields: fields{ + ManagedControlPlane: nil, + }, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &ManagedCluster{ + patchHelper: tt.fields.patchHelper, + ManagedCluster: tt.fields.ManagedCluster, + ManagedControlPlane: tt.fields.ManagedControlPlane, + ScalewayClient: tt.fields.ScalewayClient, + } + if got := c.HasPrivateNetwork(); got != tt.want { + t.Errorf("ManagedCluster.HasPrivateNetwork() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/internal/scope/managedcontrolplane.go b/internal/scope/managedcontrolplane.go new file mode 100644 index 0000000..7e01e36 --- /dev/null +++ b/internal/scope/managedcontrolplane.go @@ -0,0 +1,338 @@ +package scope + +import ( + "context" + "fmt" + "strconv" + "strings" + + infrav1 "github.com/scaleway/cluster-api-provider-scaleway/api/v1alpha1" + scwClient "github.com/scaleway/cluster-api-provider-scaleway/internal/service/scaleway/client" + "github.com/scaleway/scaleway-sdk-go/api/k8s/v1" + "github.com/scaleway/scaleway-sdk-go/scw" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + maxClusterNameLength = 100 + resourcePrefix = "caps-" +) + +type ManagedControlPlane struct { + patchHelper *patch.Helper + + Client client.Client + Cluster *clusterv1.Cluster + ManagedCluster *infrav1.ScalewayManagedCluster + ManagedControlPlane *infrav1.ScalewayManagedControlPlane + ScalewayClient scwClient.Interface +} + +// ClusterParams contains mandatory params for creating the Cluster scope. +type ManagedControlPlaneParams struct { + Client client.Client + Cluster *clusterv1.Cluster + ManagedCluster *infrav1.ScalewayManagedCluster + ManagedControlPlane *infrav1.ScalewayManagedControlPlane +} + +// NewCluster creates a new Cluster scope. +func NewManagedControlPlane(ctx context.Context, params *ManagedControlPlaneParams) (*ManagedControlPlane, error) { + c, err := newScalewayClientForScalewayManagedCluster(ctx, params.Client, params.ManagedCluster) + if err != nil { + return nil, err + } + + helper, err := patch.NewHelper(params.ManagedControlPlane, params.Client) + if err != nil { + return nil, fmt.Errorf("failed to create patch helper for ScalewayCluster: %w", err) + } + + return &ManagedControlPlane{ + patchHelper: helper, + Client: params.Client, + ScalewayClient: c, + Cluster: params.Cluster, + ManagedCluster: params.ManagedCluster, + ManagedControlPlane: params.ManagedControlPlane, + }, nil +} + +// PatchObject patches the ScalewayManagedControlPlane object. +func (m *ManagedControlPlane) PatchObject(ctx context.Context) error { + return m.patchHelper.Patch(ctx, m.ManagedControlPlane) +} + +// Close closes the Machine scope by patching the ScalewayManagedControlPlane object. +func (m *ManagedControlPlane) Close(ctx context.Context) error { + return m.PatchObject(ctx) +} + +// ResourceTags returns the tags that resources created for the control-plane should have. +// It is possible to provide additional tags that will be added to the default tags. +func (c *ManagedControlPlane) ResourceTags(additional ...string) []string { + return append( + []string{ + fmt.Sprintf("caps-namespace=%s", c.ManagedControlPlane.Namespace), + fmt.Sprintf("caps-scalewaymanagedcontrolplane=%s", c.ManagedControlPlane.Name), + }, additional...) +} + +// PrivateNetworkID returns the Private Network ID that should be used when creating +// the managed cluster. It's nil if no Private Network ID is needed. +func (m *ManagedControlPlane) PrivateNetworkID() *string { + if m.ManagedCluster.Status.Network == nil { + return nil + } + + return m.ManagedCluster.Status.Network.PrivateNetworkID +} + +// DeleteWithAdditionalResources returns true if we should tell Scaleway k8s API +// to delete additional resources when cluster is deleted. +func (m *ManagedControlPlane) DeleteWithAdditionalResources() bool { + if m.ManagedControlPlane.Spec.OnDelete == nil || m.ManagedControlPlane.Spec.OnDelete.WithAdditionalResources == nil { + return false + } + + return *m.ManagedControlPlane.Spec.OnDelete.WithAdditionalResources +} + +// SetControlPlaneEndpoint sets the control plane endpoint host and port. +func (m *ManagedControlPlane) SetControlPlaneEndpoint(host string, port int32) { + m.ManagedControlPlane.Spec.ControlPlaneEndpoint.Host = host + m.ManagedControlPlane.Spec.ControlPlaneEndpoint.Port = port +} + +// SetStatusVersion sets the current cluster Kubernetes version in the status. +func (m *ManagedControlPlane) SetStatusVersion(version string) { + m.ManagedControlPlane.Status.Version = scw.StringPtr("v" + version) +} + +// DesiredVersion returns the desired Kubernetes version, without leading "v". +func (m *ManagedControlPlane) DesiredVersion() string { + version, _ := strings.CutPrefix(m.ManagedControlPlane.Spec.Version, "v") + return version +} + +// FixedVersion returns the desired Kubernetes version, with a leading "v" if it's missing. +func (m *ManagedControlPlane) FixedVersion() string { + if !strings.HasPrefix(m.ManagedControlPlane.Spec.Version, "v") { + return "v" + m.ManagedControlPlane.Spec.Version + } + + return m.ManagedControlPlane.Spec.Version +} + +func (m *ManagedControlPlane) DesiredTags() []string { + return m.ResourceTags(m.ManagedControlPlane.Spec.AdditionalTags...) +} + +func (m *ManagedControlPlane) ClusterName() string { + if m.ManagedControlPlane.Spec.ClusterName == nil { + name, err := generateScalewayK8sName(m.ManagedControlPlane.Name, m.ManagedControlPlane.Namespace, maxClusterNameLength) + if err != nil { + panic(err) + } + + m.ManagedControlPlane.Spec.ClusterName = &name + } + + return *m.ManagedControlPlane.Spec.ClusterName +} + +func (m *ManagedControlPlane) DesiredCNI() k8s.CNI { + var cni k8s.CNI + if m.ManagedControlPlane.Spec.CNI != nil { + cni = k8s.CNI(*m.ManagedControlPlane.Spec.CNI) + } + + return cni +} + +func (m *ManagedControlPlane) DesiredType() string { + return m.ManagedControlPlane.Spec.Type +} + +func (m *ManagedControlPlane) DesiredClusterAutoscalerConfig() (*k8s.ClusterAutoscalerConfig, error) { + config := &k8s.ClusterAutoscalerConfig{ + ScaleDownDisabled: false, + ScaleDownDelayAfterAdd: "10m", + Estimator: k8s.AutoscalerEstimatorBinpacking, + Expander: k8s.AutoscalerExpanderRandom, + IgnoreDaemonsetsUtilization: false, + BalanceSimilarNodeGroups: false, + ExpendablePodsPriorityCutoff: -10, + ScaleDownUnneededTime: "10m", + ScaleDownUtilizationThreshold: 0.5, + MaxGracefulTerminationSec: 600, + } + + autoscaler := m.ManagedControlPlane.Spec.Autoscaler + if autoscaler == nil { + return config, nil + } + + if autoscaler.ScaleDownDisabled != nil { + config.ScaleDownDisabled = *autoscaler.ScaleDownDisabled + } + + if autoscaler.ScaleDownDelayAfterAdd != nil { + config.ScaleDownDelayAfterAdd = *autoscaler.ScaleDownDelayAfterAdd + } + + if autoscaler.Estimator != nil && *autoscaler.Estimator != k8s.AutoscalerEstimatorUnknownEstimator.String() { + config.Estimator = k8s.AutoscalerEstimator(*autoscaler.Estimator) + } + + if autoscaler.Expander != nil && *autoscaler.Expander != k8s.AutoscalerEstimatorUnknownEstimator.String() { + config.Expander = k8s.AutoscalerExpander(*autoscaler.Expander) + } + + if autoscaler.IgnoreDaemonsetsUtilization != nil { + config.IgnoreDaemonsetsUtilization = *autoscaler.IgnoreDaemonsetsUtilization + } + + if autoscaler.BalanceSimilarNodeGroups != nil { + config.BalanceSimilarNodeGroups = *autoscaler.BalanceSimilarNodeGroups + } + + if autoscaler.ExpendablePodsPriorityCutoff != nil { + config.ExpendablePodsPriorityCutoff = *autoscaler.ExpendablePodsPriorityCutoff + } + + if autoscaler.ScaleDownUnneededTime != nil { + config.ScaleDownUnneededTime = *autoscaler.ScaleDownUnneededTime + } + + if autoscaler.ScaleDownUtilizationThreshold != nil { + value, err := strconv.ParseFloat(*autoscaler.ScaleDownUtilizationThreshold, 32) + if err != nil { + return nil, fmt.Errorf("failed to parse scaleDownUtilizationThreshold as float32: %w", err) + } + + config.ScaleDownUtilizationThreshold = float32(value) + } + + if autoscaler.MaxGracefulTerminationSec != nil { + config.MaxGracefulTerminationSec = uint32(*autoscaler.MaxGracefulTerminationSec) + } + + return config, nil +} + +func (m *ManagedControlPlane) DesiredAutoUpgrade() *k8s.ClusterAutoUpgrade { + config := &k8s.ClusterAutoUpgrade{ + Enabled: false, + MaintenanceWindow: &k8s.MaintenanceWindow{ + StartHour: 0, + Day: k8s.MaintenanceWindowDayOfTheWeekAny, + }, + } + + autoUpgrade := m.ManagedControlPlane.Spec.AutoUpgrade + if autoUpgrade == nil { + return config + } + + config.Enabled = autoUpgrade.Enabled + + if autoUpgrade.MaintenanceWindow != nil { + config.MaintenanceWindow = &k8s.MaintenanceWindow{} + + if autoUpgrade.MaintenanceWindow.StartHour != nil { + config.MaintenanceWindow.StartHour = *scw.Uint32Ptr(uint32(*autoUpgrade.MaintenanceWindow.StartHour)) + } + + if autoUpgrade.MaintenanceWindow.Day != nil { + config.MaintenanceWindow.Day = k8s.MaintenanceWindowDayOfTheWeek(*autoUpgrade.MaintenanceWindow.Day) + } + } + + return config +} + +func (m *ManagedControlPlane) DesiredClusterOpenIDConnectConfig() *k8s.ClusterOpenIDConnectConfig { + config := &k8s.ClusterOpenIDConnectConfig{ + GroupsClaim: []string{}, + RequiredClaim: []string{}, + } + + oidc := m.ManagedControlPlane.Spec.OpenIDConnect + if oidc == nil { + return config + } + + config.IssuerURL = oidc.IssuerURL + config.ClientID = oidc.ClientID + + if config.GroupsClaim != nil { + config.GroupsClaim = oidc.GroupsClaim + } + + if config.RequiredClaim != nil { + config.RequiredClaim = oidc.RequiredClaim + } + + if oidc.UsernameClaim != nil { + config.UsernameClaim = *oidc.UsernameClaim + } + + if oidc.UsernamePrefix != nil { + config.UsernamePrefix = *oidc.UsernamePrefix + } + + if oidc.UsernameClaim != nil { + config.UsernameClaim = *oidc.UsernameClaim + } + + if oidc.GroupsPrefix != nil { + config.GroupsPrefix = *oidc.GroupsPrefix + } + + return config +} + +func (m *ManagedControlPlane) DesiredAllowedRanges() []string { + // If ACL is not configured, we want all ranges to be allowed. + if m.ManagedControlPlane.Spec.ACL == nil { + return []string{"0.0.0.0/0"} + } + + ranges := make([]string, 0, len(m.ManagedControlPlane.Spec.ACL.AllowedRanges)) + + for _, r := range m.ManagedControlPlane.Spec.ACL.AllowedRanges { + ranges = append(ranges, string(r)) + } + + return ranges +} + +func (m *ManagedControlPlane) ClusterEndpoint(cluster *k8s.Cluster) string { + if m.ManagedControlPlane.Spec.EnablePrivateEndpoint != nil && + *m.ManagedControlPlane.Spec.EnablePrivateEndpoint && + cluster.PrivateNetworkID != nil { + return fmt.Sprintf("https://%s.%s.internal:6443", cluster.ID, *cluster.PrivateNetworkID) + } + + return cluster.ClusterURL +} + +func generateScalewayK8sName(resourceName, namespace string, maxLength int) (string, error) { + escapedName := strings.ReplaceAll(resourceName, ".", "-") + name := fmt.Sprintf("%s-%s", namespace, escapedName) + + if len(name) < maxLength { + return name, nil + } + + hashLength := 64 - len(resourcePrefix) + hashedName, err := base36TruncatedHash(name, hashLength) + if err != nil { + return "", fmt.Errorf("creating hash from name: %w", err) + } + + return fmt.Sprintf("%s%s", resourcePrefix, hashedName), nil +} diff --git a/internal/scope/managedcontrolplane_test.go b/internal/scope/managedcontrolplane_test.go new file mode 100644 index 0000000..f008d9e --- /dev/null +++ b/internal/scope/managedcontrolplane_test.go @@ -0,0 +1,120 @@ +package scope + +import ( + "testing" + + infrav1 "github.com/scaleway/cluster-api-provider-scaleway/api/v1alpha1" + "github.com/scaleway/scaleway-sdk-go/scw" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func Test_generateScalewayK8sName(t *testing.T) { + type args struct { + resourceName string + namespace string + maxLength int + } + tests := []struct { + name string + args args + want string + wantErr bool + }{ + { + name: "escaped name", + args: args{ + resourceName: "test.cluster", + namespace: "default", + maxLength: maxClusterNameLength, + }, + want: "default-test-cluster", + }, + { + name: "hashed name", + args: args{ + resourceName: "test-cluster-test-cluster-test-cluster-test-cluster-test-cluster-test-cluster-test-cluster-test-cluster", + namespace: "default", + maxLength: maxClusterNameLength, + }, + want: "caps-hma5vzr1gzj7q6045d1dei138m14jwosabmbtcabyqt6kr33qfj9hs2nj3u", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := generateScalewayK8sName(tt.args.resourceName, tt.args.namespace, tt.args.maxLength) + if (err != nil) != tt.wantErr { + t.Errorf("generateScalewayK8sName() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("generateScalewayK8sName() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestManagedControlPlane_ClusterName(t *testing.T) { + type fields struct { + ManagedCluster *infrav1.ScalewayManagedCluster + ManagedControlPlane *infrav1.ScalewayManagedControlPlane + } + tests := []struct { + name string + fields fields + want string + }{ + { + name: "name already present", + fields: fields{ + ManagedCluster: &infrav1.ScalewayManagedCluster{ + ObjectMeta: v1.ObjectMeta{ + Name: "cluster", + Namespace: "default", + }, + }, + ManagedControlPlane: &infrav1.ScalewayManagedControlPlane{ + ObjectMeta: v1.ObjectMeta{ + Name: "cluster", + Namespace: "default", + }, + Spec: infrav1.ScalewayManagedControlPlaneSpec{ + ClusterName: scw.StringPtr("mycluster"), + }, + }, + }, + want: "mycluster", + }, + { + name: "generate name", + fields: fields{ + ManagedCluster: &infrav1.ScalewayManagedCluster{ + ObjectMeta: v1.ObjectMeta{ + Name: "cluster", + Namespace: "default", + }, + }, + ManagedControlPlane: &infrav1.ScalewayManagedControlPlane{ + ObjectMeta: v1.ObjectMeta{ + Name: "cluster", + Namespace: "default", + }, + }, + }, + want: "default-cluster", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := &ManagedControlPlane{ + ManagedCluster: tt.fields.ManagedCluster, + ManagedControlPlane: tt.fields.ManagedControlPlane, + } + if got := m.ClusterName(); got != tt.want { + t.Errorf("ManagedControlPlane.ClusterName() = %v, want %v", got, tt.want) + } + if tt.want != *m.ManagedControlPlane.Spec.ClusterName { + t.Errorf("expected ManagedControlPlane.Spec.ClusterName to equal %v, got %v", tt.want, *m.ManagedControlPlane.Spec.ClusterName) + } + }) + } +} diff --git a/internal/scope/managedmachinepool.go b/internal/scope/managedmachinepool.go new file mode 100644 index 0000000..42ecb17 --- /dev/null +++ b/internal/scope/managedmachinepool.go @@ -0,0 +1,217 @@ +package scope + +import ( + "context" + "fmt" + "math" + "strings" + + infrav1 "github.com/scaleway/cluster-api-provider-scaleway/api/v1alpha1" + scwClient "github.com/scaleway/cluster-api-provider-scaleway/internal/service/scaleway/client" + "github.com/scaleway/scaleway-sdk-go/api/k8s/v1" + "github.com/scaleway/scaleway-sdk-go/scw" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type ManagedMachinePool struct { + patchHelper *patch.Helper + Client client.Client + Cluster *clusterv1.Cluster + MachinePool *expclusterv1.MachinePool + ManagedCluster *infrav1.ScalewayManagedCluster + ManagedControlPlane *infrav1.ScalewayManagedControlPlane + ManagedMachinePool *infrav1.ScalewayManagedMachinePool + ScalewayClient scwClient.Interface +} + +// ClusterParams contains mandatory params for creating the Cluster scope. +type ManagedMachinePoolParams struct { + Client client.Client + Cluster *clusterv1.Cluster + MachinePool *expclusterv1.MachinePool + ManagedCluster *infrav1.ScalewayManagedCluster + ManagedControlPlane *infrav1.ScalewayManagedControlPlane + ManagedMachinePool *infrav1.ScalewayManagedMachinePool +} + +// NewCluster creates a new Cluster scope. +func NewManagedMachinePool(ctx context.Context, params *ManagedMachinePoolParams) (*ManagedMachinePool, error) { + c, err := newScalewayClientForScalewayManagedCluster(ctx, params.Client, params.ManagedCluster) + if err != nil { + return nil, err + } + + helper, err := patch.NewHelper(params.ManagedMachinePool, params.Client) + if err != nil { + return nil, fmt.Errorf("failed to create patch helper for ScalewayManagedMachinePool: %w", err) + } + + return &ManagedMachinePool{ + patchHelper: helper, + Client: params.Client, + ScalewayClient: c, + Cluster: params.Cluster, + MachinePool: params.MachinePool, + ManagedCluster: params.ManagedCluster, + ManagedControlPlane: params.ManagedControlPlane, + ManagedMachinePool: params.ManagedMachinePool, + }, nil +} + +// PatchObject patches the ScalewayManagedControlPlane object. +func (m *ManagedMachinePool) PatchObject(ctx context.Context) error { + return m.patchHelper.Patch(ctx, m.ManagedMachinePool) +} + +// Close closes the Machine scope by patching the ScalewayManagedControlPlane object. +func (m *ManagedMachinePool) Close(ctx context.Context) error { + return m.PatchObject(ctx) +} + +// ResourceName returns the name/prefix that resources created for the cluster should have. +// It is possible to provide additional suffixes that will be appended to the name with a leading "-". +func (m *ManagedMachinePool) ResourceName(suffixes ...string) string { + return strings.Join(append([]string{m.ManagedMachinePool.Name}, suffixes...), "-") +} + +// ResourceTags returns the tags that resources created for the cluster should have. +// It is possible to provide additional tags that will be added to the default tags. +func (c *ManagedMachinePool) ResourceTags(additional ...string) []string { + return append( + []string{ + fmt.Sprintf("caps-namespace=%s", c.ManagedMachinePool.Namespace), + fmt.Sprintf("caps-scalewaymanagedmachinepool=%s", c.ManagedMachinePool.Name), + }, additional...) +} + +func (c *ManagedMachinePool) ClusterName() (string, bool) { + if c.ManagedControlPlane.Spec.ClusterName == nil { + return "", false + } + + return *c.ManagedControlPlane.Spec.ClusterName, true +} + +func (c *ManagedMachinePool) Scaling() (autoscaling bool, size, min, max uint32) { + // Completely ignore scaling parameters for external node pools. + if c.ManagedMachinePool.Spec.NodeType == "external" { + return + } + + size = c.replicas() + + if c.ManagedMachinePool.Spec.Scaling != nil { + if c.ManagedMachinePool.Spec.Scaling.Autoscaling != nil { + autoscaling = *c.ManagedMachinePool.Spec.Scaling.Autoscaling + } + + if c.ManagedMachinePool.Spec.Scaling.MinSize != nil { + min = uint32(*c.ManagedMachinePool.Spec.Scaling.MinSize) + } + + if c.ManagedMachinePool.Spec.Scaling.MaxSize != nil { + max = uint32(*c.ManagedMachinePool.Spec.Scaling.MaxSize) + } + } + + min = uint32(math.Min(float64(min), float64(size))) + max = uint32(math.Max(float64(max), float64(size))) + + return +} + +func (c *ManagedMachinePool) Autohealing() bool { + if c.ManagedMachinePool.Spec.Autohealing == nil { + return false + } + + return *c.ManagedMachinePool.Spec.Autohealing +} + +func (c *ManagedMachinePool) PublicIPDisabled() bool { + if c.ManagedMachinePool.Spec.PublicIPDisabled == nil { + return false + } + + return *c.ManagedMachinePool.Spec.PublicIPDisabled +} + +func (c *ManagedMachinePool) replicas() uint32 { + if c.MachinePool.Spec.Replicas == nil { + return 3 + } + + return uint32(*c.MachinePool.Spec.Replicas) +} + +func (c *ManagedMachinePool) RootVolumeSizeGB() *uint64 { + if c.ManagedMachinePool.Spec.RootVolumeSizeGB == nil { + return nil + } + + return scw.Uint64Ptr(uint64(*c.ManagedMachinePool.Spec.RootVolumeSizeGB)) +} + +func (c *ManagedMachinePool) SetProviderIDs(nodes []*k8s.Node) { + providerIDs := make([]string, 0, len(nodes)) + + for _, node := range nodes { + if node.ProviderID == "" { + continue + } + + providerIDs = append(providerIDs, node.ProviderID) + } + + c.ManagedMachinePool.Spec.ProviderIDList = providerIDs +} + +func (c *ManagedMachinePool) SetStatusReplicas(replicas uint32) { + c.ManagedMachinePool.Status.Replicas = int32(replicas) +} + +func (c *ManagedMachinePool) RootVolumeType() k8s.PoolVolumeType { + if c.ManagedMachinePool.Spec.RootVolumeType == nil { + return k8s.PoolVolumeTypeDefaultVolumeType + } + + return k8s.PoolVolumeType(*c.ManagedMachinePool.Spec.RootVolumeType) +} + +func (c *ManagedMachinePool) DesiredPoolUpgradePolicy() *k8s.PoolUpgradePolicy { + policy := &k8s.PoolUpgradePolicy{ + MaxSurge: 0, + MaxUnavailable: 1, + } + + if c.ManagedMachinePool.Spec.UpgradePolicy == nil { + return policy + } + + if c.ManagedMachinePool.Spec.UpgradePolicy.MaxSurge != nil { + policy.MaxSurge = uint32(*c.ManagedMachinePool.Spec.UpgradePolicy.MaxSurge) + } + + if c.ManagedMachinePool.Spec.UpgradePolicy.MaxUnavailable != nil { + policy.MaxUnavailable = uint32(*c.ManagedMachinePool.Spec.UpgradePolicy.MaxUnavailable) + } + + return policy +} + +func (m *ManagedMachinePool) DesiredTags() []string { + return m.ResourceTags(m.ManagedMachinePool.Spec.AdditionalTags...) +} + +func (m *ManagedMachinePool) DesiredVersion() *string { + version := m.MachinePool.Spec.Template.Spec.Version + if version == nil { + return nil + } + + *version, _ = strings.CutPrefix(*version, "v") + return version +} diff --git a/internal/scope/scope.go b/internal/scope/scope.go new file mode 100644 index 0000000..9465ee7 --- /dev/null +++ b/internal/scope/scope.go @@ -0,0 +1,10 @@ +package scope + +import "github.com/scaleway/cluster-api-provider-scaleway/internal/service/scaleway/client" + +type Interface interface { + Cloud() client.Interface + SetCloud(client.Interface) // SetCloud is used for testing. + ResourceName(suffixes ...string) string + ResourceTags(additional ...string) []string +} diff --git a/internal/service/scaleway/client/client.go b/internal/service/scaleway/client/client.go index 9772ef9..1fa04f1 100644 --- a/internal/service/scaleway/client/client.go +++ b/internal/service/scaleway/client/client.go @@ -9,6 +9,7 @@ import ( domain "github.com/scaleway/scaleway-sdk-go/api/domain/v2beta1" "github.com/scaleway/scaleway-sdk-go/api/instance/v1" "github.com/scaleway/scaleway-sdk-go/api/ipam/v1" + "github.com/scaleway/scaleway-sdk-go/api/k8s/v1" "github.com/scaleway/scaleway-sdk-go/api/lb/v1" "github.com/scaleway/scaleway-sdk-go/api/marketplace/v2" "github.com/scaleway/scaleway-sdk-go/api/vpc/v2" @@ -32,6 +33,9 @@ type Client struct { projectID string region scw.Region + // Exposed config. + secretKey string + // Product APIs vpc VPCAPI vpcgw VPCGWAPI @@ -41,6 +45,7 @@ type Client struct { block BlockAPI marketplace MarketplaceAPI ipam IPAMAPI + k8s K8sAPI } // New returns a new Scaleway client based on the provided region and secretData. @@ -75,6 +80,7 @@ func New(region scw.Region, projectID string, secretData map[string][]byte) (*Cl return &Client{ projectID: projectID, region: region, + secretKey: secretKey, vpc: vpc.NewAPI(client), vpcgw: vpcgw.NewAPI(client), lb: lb.NewZonedAPI(client), @@ -83,9 +89,16 @@ func New(region scw.Region, projectID string, secretData map[string][]byte) (*Cl block: block.NewAPI(client), marketplace: marketplace.NewAPI(client), ipam: ipam.NewAPI(client), + k8s: k8s.NewAPI(client), }, nil } +// TagsWithoutCreatedBy returns tags on a Scaleway resource, without the "created-by=..." +// tag that is automatically added by the client. +func TagsWithoutCreatedBy(tags []string) []string { + return slices.DeleteFunc(tags, func(s string) bool { return s == createdByTag }) +} + func matchTags(tags []string, wantedTags []string) bool { for _, tag := range wantedTags { if !slices.Contains(tags, tag) { diff --git a/internal/service/scaleway/client/client_test.go b/internal/service/scaleway/client/client_test.go index 2aa804a..a8af65a 100644 --- a/internal/service/scaleway/client/client_test.go +++ b/internal/service/scaleway/client/client_test.go @@ -2,10 +2,17 @@ package client import ( "errors" + "reflect" "testing" + + . "github.com/onsi/gomega" + "github.com/scaleway/scaleway-sdk-go/scw" ) -const projectID = "11111111-1111-1111-1111-111111111111" +const ( + projectID = "11111111-1111-1111-1111-111111111111" + secretKey = "11111111-1111-1111-1111-111111111111" +) var errAPI = errors.New("API error") @@ -77,3 +84,109 @@ func Test_matchTags(t *testing.T) { }) } } + +func TestNew(t *testing.T) { + type args struct { + region scw.Region + projectID string + secretData map[string][]byte + } + tests := []struct { + name string + args args + asserts func(g *WithT, c *Client) + wantErr bool + }{ + { + name: "empty secret", + args: args{ + region: scw.RegionFrPar, + projectID: projectID, + secretData: map[string][]byte{}, + }, + wantErr: true, + }, + { + name: "invalid access key format", + args: args{ + region: scw.RegionFrPar, + projectID: projectID, + secretData: map[string][]byte{ + scw.ScwAccessKeyEnv: []byte("a"), + scw.ScwSecretKeyEnv: []byte(secretKey), + }, + }, + wantErr: true, + }, + { + name: "new client", + args: args{ + region: scw.RegionFrPar, + projectID: projectID, + secretData: map[string][]byte{ + scw.ScwAccessKeyEnv: []byte("SCWXXXXXXXXXXXXXXXXX"), + scw.ScwSecretKeyEnv: []byte(secretKey), + scw.ScwAPIURLEnv: []byte("https://api.scaleway.com"), + }, + }, + asserts: func(g *WithT, c *Client) { + g.Expect(c).ToNot(BeNil()) + g.Expect(c.region).To(Equal(scw.RegionFrPar)) + g.Expect(c.projectID).To(Equal(projectID)) + g.Expect(c.secretKey).To(Equal(secretKey)) + g.Expect(c.vpc).ToNot(BeNil()) + g.Expect(c.vpcgw).ToNot(BeNil()) + g.Expect(c.lb).ToNot(BeNil()) + g.Expect(c.domain).ToNot(BeNil()) + g.Expect(c.instance).ToNot(BeNil()) + g.Expect(c.block).ToNot(BeNil()) + g.Expect(c.marketplace).ToNot(BeNil()) + g.Expect(c.ipam).ToNot(BeNil()) + g.Expect(c.k8s).ToNot(BeNil()) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + got, err := New(tt.args.region, tt.args.projectID, tt.args.secretData) + if (err != nil) != tt.wantErr { + t.Errorf("New() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if err == nil { + tt.asserts(g, got) + } + }) + } +} + +func TestTagsWithoutCreatedBy(t *testing.T) { + t.Parallel() + type args struct { + tags []string + } + tests := []struct { + name string + args args + want []string + }{ + { + name: "remove created-by tag", + args: args{ + tags: []string{"a", "b", "c", createdByTag}, + }, + want: []string{"a", "b", "c"}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + if got := TagsWithoutCreatedBy(tt.args.tags); !reflect.DeepEqual(got, tt.want) { + t.Errorf("TagsWithoutCreatedBy() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/internal/service/scaleway/client/config.go b/internal/service/scaleway/client/config.go new file mode 100644 index 0000000..07ae487 --- /dev/null +++ b/internal/service/scaleway/client/config.go @@ -0,0 +1,9 @@ +package client + +type Config interface { + GetSecretKey() string +} + +func (c *Client) GetSecretKey() string { + return c.secretKey +} diff --git a/internal/service/scaleway/client/interface.go b/internal/service/scaleway/client/interface.go index 3bc9a94..e407ae4 100644 --- a/internal/service/scaleway/client/interface.go +++ b/internal/service/scaleway/client/interface.go @@ -4,9 +4,11 @@ package client // a specific region and project. type Interface interface { Block + Config Domain Instance IPAM + K8s LB Marketplace VPC diff --git a/internal/service/scaleway/client/k8s.go b/internal/service/scaleway/client/k8s.go new file mode 100644 index 0000000..bee3ce3 --- /dev/null +++ b/internal/service/scaleway/client/k8s.go @@ -0,0 +1,378 @@ +package client + +import ( + "context" + "fmt" + "slices" + + "github.com/scaleway/scaleway-sdk-go/api/k8s/v1" + "github.com/scaleway/scaleway-sdk-go/scw" +) + +type K8sAPI interface { + ListClusters(req *k8s.ListClustersRequest, opts ...scw.RequestOption) (*k8s.ListClustersResponse, error) + CreateCluster(req *k8s.CreateClusterRequest, opts ...scw.RequestOption) (*k8s.Cluster, error) + DeleteCluster(req *k8s.DeleteClusterRequest, opts ...scw.RequestOption) (*k8s.Cluster, error) + GetClusterKubeConfig(req *k8s.GetClusterKubeConfigRequest, opts ...scw.RequestOption) (*k8s.Kubeconfig, error) + UpdateCluster(req *k8s.UpdateClusterRequest, opts ...scw.RequestOption) (*k8s.Cluster, error) + UpgradeCluster(req *k8s.UpgradeClusterRequest, opts ...scw.RequestOption) (*k8s.Cluster, error) + SetClusterType(req *k8s.SetClusterTypeRequest, opts ...scw.RequestOption) (*k8s.Cluster, error) + ListPools(req *k8s.ListPoolsRequest, opts ...scw.RequestOption) (*k8s.ListPoolsResponse, error) + CreatePool(req *k8s.CreatePoolRequest, opts ...scw.RequestOption) (*k8s.Pool, error) + UpdatePool(req *k8s.UpdatePoolRequest, opts ...scw.RequestOption) (*k8s.Pool, error) + UpgradePool(req *k8s.UpgradePoolRequest, opts ...scw.RequestOption) (*k8s.Pool, error) + DeletePool(req *k8s.DeletePoolRequest, opts ...scw.RequestOption) (*k8s.Pool, error) + ListNodes(req *k8s.ListNodesRequest, opts ...scw.RequestOption) (*k8s.ListNodesResponse, error) + ListClusterACLRules(req *k8s.ListClusterACLRulesRequest, opts ...scw.RequestOption) (*k8s.ListClusterACLRulesResponse, error) + SetClusterACLRules(req *k8s.SetClusterACLRulesRequest, opts ...scw.RequestOption) (*k8s.SetClusterACLRulesResponse, error) +} + +type K8s interface { + FindCluster(ctx context.Context, name string) (*k8s.Cluster, error) + CreateCluster( + ctx context.Context, + name, clusterType, version string, + pnID *string, + tags, featureGates, admissionPlugins, apiServerCertSANs []string, + cni k8s.CNI, + autoscalerConfig *k8s.CreateClusterRequestAutoscalerConfig, + autoUpgrade *k8s.CreateClusterRequestAutoUpgrade, + openIDConnectConfig *k8s.CreateClusterRequestOpenIDConnectConfig, + podCIDR, serviceCIDR scw.IPNet, + ) (*k8s.Cluster, error) + DeleteCluster(ctx context.Context, id string, withAdditionalResources bool) error + GetClusterKubeConfig(ctx context.Context, id string) (*k8s.Kubeconfig, error) + UpdateCluster( + ctx context.Context, + id string, + tags, featureGates, admissionPlugins, apiServerCertSANs *[]string, + autoscalerConfig *k8s.UpdateClusterRequestAutoscalerConfig, + autoUpgrade *k8s.UpdateClusterRequestAutoUpgrade, + openIDConnectConfig *k8s.UpdateClusterRequestOpenIDConnectConfig, + ) error + UpgradeCluster(ctx context.Context, id, version string) error + SetClusterType(ctx context.Context, id, clusterType string) error + FindPool(ctx context.Context, clusterID, name string) (*k8s.Pool, error) + CreatePool( + ctx context.Context, + zone scw.Zone, + clusterID, name, nodeType string, + placementGroupID, securityGroupID *string, + autoscaling, autohealing, publicIPDisabled bool, + size uint32, + minSize, maxSize *uint32, + tags []string, + kubeletArgs map[string]string, + rootVolumeType k8s.PoolVolumeType, + rootVolumeSizeGB *uint64, + upgradePolicy *k8s.CreatePoolRequestUpgradePolicy, + ) (*k8s.Pool, error) + UpdatePool( + ctx context.Context, + id string, + autoscaling, autohealing *bool, + size, minSize, maxSize *uint32, + tags *[]string, + kubeletArgs *map[string]string, + upgradePolicy *k8s.UpdatePoolRequestUpgradePolicy, + ) error + UpgradePool(ctx context.Context, id, version string) error + DeletePool(ctx context.Context, id string) error + ListNodes(ctx context.Context, clusterID, poolID string) ([]*k8s.Node, error) + ListClusterACLRules(ctx context.Context, clusterID string) ([]*k8s.ACLRule, error) + SetClusterACLRules(ctx context.Context, clusterID string, rules []*k8s.ACLRuleRequest) error +} + +func (c *Client) FindCluster(ctx context.Context, name string) (*k8s.Cluster, error) { + resp, err := c.k8s.ListClusters(&k8s.ListClustersRequest{ + ProjectID: &c.projectID, + Name: &name, + }, scw.WithContext(ctx), scw.WithAllPages()) + if err != nil { + return nil, newCallError("ListClusters", err) + } + + // Filter out all clusters that have the wrong name. + clusters := slices.DeleteFunc(resp.Clusters, func(cluster *k8s.Cluster) bool { + return cluster.Name != name + }) + + switch len(clusters) { + case 0: + return nil, ErrNoItemFound + case 1: + return clusters[0], nil + default: + // This case should never happen as k8s API prevents the creation of + // multiple clusters with the same name. + return nil, fmt.Errorf("%w: found %d clusters with name %s", ErrTooManyItemsFound, len(clusters), name) + } +} + +func (c *Client) CreateCluster( + ctx context.Context, + name, clusterType, version string, + pnID *string, + tags, featureGates, admissionPlugins, apiServerCertSANs []string, + cni k8s.CNI, + autoscalerConfig *k8s.CreateClusterRequestAutoscalerConfig, + autoUpgrade *k8s.CreateClusterRequestAutoUpgrade, + openIDConnectConfig *k8s.CreateClusterRequestOpenIDConnectConfig, + podCIDR, serviceCIDR scw.IPNet, +) (*k8s.Cluster, error) { + cluster, err := c.k8s.CreateCluster(&k8s.CreateClusterRequest{ + Name: name, + Type: clusterType, + Description: createdByDescription, + Tags: append(tags, createdByTag), + Version: version, + Cni: cni, + PrivateNetworkID: pnID, + AutoscalerConfig: autoscalerConfig, + AutoUpgrade: autoUpgrade, + FeatureGates: featureGates, + AdmissionPlugins: admissionPlugins, + OpenIDConnectConfig: openIDConnectConfig, + ApiserverCertSans: apiServerCertSANs, + PodCidr: &podCIDR, + ServiceCidr: &serviceCIDR, + }, scw.WithContext(ctx)) + if err != nil { + return nil, newCallError("CreateCluster", err) + } + + return cluster, nil +} + +func (c *Client) DeleteCluster(ctx context.Context, id string, withAdditionalResources bool) error { + if _, err := c.k8s.DeleteCluster(&k8s.DeleteClusterRequest{ + ClusterID: id, + WithAdditionalResources: withAdditionalResources, + }, scw.WithContext(ctx)); err != nil { + return newCallError("DeleteCluster", err) + } + + return nil +} + +func (c *Client) GetClusterKubeConfig(ctx context.Context, id string) (*k8s.Kubeconfig, error) { + kubeconfig, err := c.k8s.GetClusterKubeConfig(&k8s.GetClusterKubeConfigRequest{ + ClusterID: id, + }, scw.WithContext(ctx)) + if err != nil { + return nil, newCallError("GetClusterKubeConfig", err) + } + + return kubeconfig, nil +} + +func (c *Client) UpdateCluster( + ctx context.Context, + id string, + tags, featureGates, admissionPlugins, apiServerCertSANs *[]string, + autoscalerConfig *k8s.UpdateClusterRequestAutoscalerConfig, + autoUpgrade *k8s.UpdateClusterRequestAutoUpgrade, + openIDConnectConfig *k8s.UpdateClusterRequestOpenIDConnectConfig, +) error { + if tags != nil { + *tags = append(*tags, createdByTag) + } + + if _, err := c.k8s.UpdateCluster(&k8s.UpdateClusterRequest{ + ClusterID: id, + Tags: tags, + AutoscalerConfig: autoscalerConfig, + AutoUpgrade: autoUpgrade, + FeatureGates: featureGates, + AdmissionPlugins: admissionPlugins, + OpenIDConnectConfig: openIDConnectConfig, + ApiserverCertSans: apiServerCertSANs, + }, scw.WithContext(ctx)); err != nil { + return newCallError("UpdateCluster", err) + } + + return nil +} + +func (c *Client) UpgradeCluster(ctx context.Context, id, version string) error { + if _, err := c.k8s.UpgradeCluster(&k8s.UpgradeClusterRequest{ + ClusterID: id, + Version: version, + UpgradePools: false, + }, scw.WithContext(ctx)); err != nil { + return newCallError("UpgradeCluster", err) + } + + return nil +} + +func (c *Client) SetClusterType(ctx context.Context, id, clusterType string) error { + if _, err := c.k8s.SetClusterType(&k8s.SetClusterTypeRequest{ + ClusterID: id, + Type: clusterType, + }, scw.WithContext(ctx)); err != nil { + return newCallError("SetClusterType", err) + } + + return nil +} + +func (c *Client) FindPool(ctx context.Context, clusterID, name string) (*k8s.Pool, error) { + resp, err := c.k8s.ListPools(&k8s.ListPoolsRequest{ + ClusterID: clusterID, + Name: scw.StringPtr(name), + }, scw.WithContext(ctx), scw.WithAllPages()) + if err != nil { + return nil, newCallError("ListPools", err) + } + + // Filter out all pools that have the wrong name. + pools := slices.DeleteFunc(resp.Pools, func(pool *k8s.Pool) bool { + return pool.Name != name + }) + + switch len(pools) { + case 0: + return nil, ErrNoItemFound + case 1: + return pools[0], nil + default: + // This case should never happen as k8s API prevents the creation of + // multiple pools with the same name. + return nil, fmt.Errorf("%w: found %d pools with name %s", ErrTooManyItemsFound, len(pools), name) + } +} + +func (c *Client) CreatePool( + ctx context.Context, + zone scw.Zone, + clusterID, name, nodeType string, + placementGroupID, securityGroupID *string, + autoscaling, autohealing, publicIPDisabled bool, + size uint32, + minSize, maxSize *uint32, + tags []string, + kubeletArgs map[string]string, + rootVolumeType k8s.PoolVolumeType, + rootVolumeSizeGB *uint64, + upgradePolicy *k8s.CreatePoolRequestUpgradePolicy, +) (*k8s.Pool, error) { + var rootVolumeSize *scw.Size + if rootVolumeSizeGB != nil { + rootVolumeSize = scw.SizePtr(scw.Size(*rootVolumeSizeGB) * scw.GB) + } + + pool, err := c.k8s.CreatePool(&k8s.CreatePoolRequest{ + Zone: zone, + ClusterID: clusterID, + Name: name, + NodeType: nodeType, + PlacementGroupID: placementGroupID, + Autoscaling: autoscaling, + Autohealing: autohealing, + PublicIPDisabled: publicIPDisabled, + Size: size, + MinSize: minSize, + MaxSize: maxSize, + Tags: append(tags, createdByTag), + KubeletArgs: kubeletArgs, + RootVolumeType: rootVolumeType, + RootVolumeSize: rootVolumeSize, + SecurityGroupID: securityGroupID, + UpgradePolicy: upgradePolicy, + }, scw.WithContext(ctx)) + if err != nil { + return nil, newCallError("CreatePool", err) + } + + return pool, nil +} + +func (c *Client) UpdatePool( + ctx context.Context, + id string, + autoscaling, autohealing *bool, + size, minSize, maxSize *uint32, + tags *[]string, + kubeletArgs *map[string]string, + upgradePolicy *k8s.UpdatePoolRequestUpgradePolicy, +) error { + if tags != nil { + *tags = append(*tags, createdByTag) + } + + if _, err := c.k8s.UpdatePool(&k8s.UpdatePoolRequest{ + PoolID: id, + Autoscaling: autoscaling, + Size: size, + MinSize: minSize, + MaxSize: maxSize, + Autohealing: autohealing, + Tags: tags, + KubeletArgs: kubeletArgs, + UpgradePolicy: upgradePolicy, + }, scw.WithContext(ctx)); err != nil { + return newCallError("UpdatePool", err) + } + + return nil +} + +func (c *Client) UpgradePool(ctx context.Context, id, version string) error { + if _, err := c.k8s.UpgradePool(&k8s.UpgradePoolRequest{ + PoolID: id, + Version: version, + }, scw.WithContext(ctx)); err != nil { + return newCallError("UpgradePool", err) + } + + return nil +} + +func (c *Client) DeletePool(ctx context.Context, id string) error { + if _, err := c.k8s.DeletePool(&k8s.DeletePoolRequest{ + PoolID: id, + }, scw.WithContext(ctx)); err != nil { + return newCallError("DeletePool", err) + } + + return nil +} + +func (c *Client) ListNodes(ctx context.Context, clusterID, poolID string) ([]*k8s.Node, error) { + resp, err := c.k8s.ListNodes(&k8s.ListNodesRequest{ + ClusterID: clusterID, + PoolID: &poolID, + }, scw.WithContext(ctx), scw.WithAllPages()) + if err != nil { + return nil, newCallError("ListNodes", err) + } + + return resp.Nodes, nil +} + +func (c *Client) ListClusterACLRules(ctx context.Context, clusterID string) ([]*k8s.ACLRule, error) { + resp, err := c.k8s.ListClusterACLRules(&k8s.ListClusterACLRulesRequest{ + ClusterID: clusterID, + }, scw.WithContext(ctx), scw.WithAllPages()) + if err != nil { + return nil, newCallError("ListClusterACLRules", err) + } + + return resp.Rules, nil +} + +func (c *Client) SetClusterACLRules(ctx context.Context, clusterID string, rules []*k8s.ACLRuleRequest) error { + for _, rule := range rules { + rule.Description = createdByDescription + } + + if _, err := c.k8s.SetClusterACLRules(&k8s.SetClusterACLRulesRequest{ + ClusterID: clusterID, + ACLs: rules, + }, scw.WithContext(ctx)); err != nil { + return newCallError("SetClusterACLRules", err) + } + + return nil +} diff --git a/internal/service/scaleway/client/k8s_test.go b/internal/service/scaleway/client/k8s_test.go new file mode 100644 index 0000000..04f33c7 --- /dev/null +++ b/internal/service/scaleway/client/k8s_test.go @@ -0,0 +1,1167 @@ +package client + +import ( + "context" + "net" + "reflect" + "testing" + + "github.com/scaleway/cluster-api-provider-scaleway/internal/service/scaleway/client/mock_client" + "github.com/scaleway/scaleway-sdk-go/api/k8s/v1" + "github.com/scaleway/scaleway-sdk-go/scw" + "go.uber.org/mock/gomock" +) + +const ( + clusterID = "11111111-1111-1111-1111-111111111111" + poolID = "11111111-1111-1111-1111-111111111111" + aclID1 = "11111111-1111-1111-1111-111111111111" + aclID2 = "22222222-1111-1111-1111-111111111111" +) + +func TestClient_FindCluster(t *testing.T) { + t.Parallel() + type fields struct { + projectID string + } + type args struct { + ctx context.Context + name string + } + tests := []struct { + name string + fields fields + args args + want *k8s.Cluster + wantErr bool + expect func(d *mock_client.MockK8sAPIMockRecorder) + }{ + { + name: "cluster found", + fields: fields{ + projectID: projectID, + }, + args: args{ + ctx: context.TODO(), + name: "mycluster", + }, + want: &k8s.Cluster{ + ID: clusterID, + Name: "mycluster", + }, + expect: func(d *mock_client.MockK8sAPIMockRecorder) { + d.ListClusters(&k8s.ListClustersRequest{ + ProjectID: scw.StringPtr(projectID), + Name: scw.StringPtr("mycluster"), + }, gomock.Any()).Return(&k8s.ListClustersResponse{ + TotalCount: 1, + Clusters: []*k8s.Cluster{ + { + ID: clusterID, + Name: "mycluster", + }, + }, + }, nil) + }, + }, + { + name: "no cluster found", + fields: fields{ + projectID: projectID, + }, + args: args{ + ctx: context.TODO(), + name: "mycluster", + }, + wantErr: true, + expect: func(d *mock_client.MockK8sAPIMockRecorder) { + d.ListClusters(&k8s.ListClustersRequest{ + ProjectID: scw.StringPtr(projectID), + Name: scw.StringPtr("mycluster"), + }, gomock.Any(), gomock.Any()).Return(&k8s.ListClustersResponse{}, nil) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + k8sMock := mock_client.NewMockK8sAPI(mockCtrl) + + tt.expect(k8sMock.EXPECT()) + + c := &Client{ + projectID: tt.fields.projectID, + k8s: k8sMock, + } + got, err := c.FindCluster(tt.args.ctx, tt.args.name) + if (err != nil) != tt.wantErr { + t.Errorf("Client.FindCluster() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Client.FindCluster() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestClient_CreateCluster(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + name string + clusterType string + version string + pnID *string + tags []string + featureGates []string + admissionPlugins []string + apiServerCertSANs []string + cni k8s.CNI + autoscalerConfig *k8s.CreateClusterRequestAutoscalerConfig + autoUpgrade *k8s.CreateClusterRequestAutoUpgrade + openIDConnectConfig *k8s.CreateClusterRequestOpenIDConnectConfig + podCIDR scw.IPNet + serviceCIDR scw.IPNet + } + tests := []struct { + name string + args args + want *k8s.Cluster + wantErr bool + expect func(d *mock_client.MockK8sAPIMockRecorder) + }{ + { + name: "create cluster", + args: args{ + ctx: context.TODO(), + name: "test", + clusterType: "kapsule", + version: "1.30.4", + pnID: scw.StringPtr(privateNetworkID), + tags: []string{"tag1", "tag2"}, + featureGates: []string{"HPAScaleToZero"}, + admissionPlugins: []string{"AlwaysPullImages"}, + apiServerCertSANs: []string{"my-cluster.test"}, + cni: k8s.CNICilium, + autoscalerConfig: &k8s.CreateClusterRequestAutoscalerConfig{ + ScaleDownDisabled: scw.BoolPtr(false), + ScaleDownDelayAfterAdd: scw.StringPtr("1m"), + Estimator: k8s.AutoscalerEstimatorBinpacking, + Expander: k8s.AutoscalerExpanderMostPods, + IgnoreDaemonsetsUtilization: scw.BoolPtr(true), + BalanceSimilarNodeGroups: scw.BoolPtr(true), + ExpendablePodsPriorityCutoff: scw.Int32Ptr(1), + ScaleDownUnneededTime: scw.StringPtr("1m"), + ScaleDownUtilizationThreshold: scw.Float32Ptr(1), + MaxGracefulTerminationSec: scw.Uint32Ptr(30), + }, + autoUpgrade: &k8s.CreateClusterRequestAutoUpgrade{ + Enable: true, + MaintenanceWindow: &k8s.MaintenanceWindow{ + StartHour: 1, + Day: k8s.MaintenanceWindowDayOfTheWeekFriday, + }, + }, + openIDConnectConfig: &k8s.CreateClusterRequestOpenIDConnectConfig{ + IssuerURL: "http://oidcprovider.test", + ClientID: "abcd", + UsernameClaim: scw.StringPtr("username"), + UsernamePrefix: scw.StringPtr("usernameprefix"), + GroupsClaim: &[]string{"groups"}, + GroupsPrefix: scw.StringPtr("groupsprefix"), + RequiredClaim: &[]string{"verified"}, + }, + podCIDR: scw.IPNet{IPNet: net.IPNet{IP: net.IPv4(192, 168, 0, 0), Mask: net.IPv4Mask(255, 255, 0, 0)}}, + serviceCIDR: scw.IPNet{IPNet: net.IPNet{IP: net.IPv4(10, 0, 0, 0), Mask: net.IPv4Mask(255, 0, 0, 0)}}, + }, + want: &k8s.Cluster{ + ID: clusterID, + Name: "test", + }, + expect: func(d *mock_client.MockK8sAPIMockRecorder) { + d.CreateCluster(&k8s.CreateClusterRequest{ + Type: "kapsule", + Name: "test", + Description: createdByDescription, + Tags: []string{"tag1", "tag2", createdByTag}, + Version: "1.30.4", + Cni: k8s.CNICilium, + AutoscalerConfig: &k8s.CreateClusterRequestAutoscalerConfig{ + ScaleDownDisabled: scw.BoolPtr(false), + ScaleDownDelayAfterAdd: scw.StringPtr("1m"), + Estimator: k8s.AutoscalerEstimatorBinpacking, + Expander: k8s.AutoscalerExpanderMostPods, + IgnoreDaemonsetsUtilization: scw.BoolPtr(true), + BalanceSimilarNodeGroups: scw.BoolPtr(true), + ExpendablePodsPriorityCutoff: scw.Int32Ptr(1), + ScaleDownUnneededTime: scw.StringPtr("1m"), + ScaleDownUtilizationThreshold: scw.Float32Ptr(1), + MaxGracefulTerminationSec: scw.Uint32Ptr(30), + }, + AutoUpgrade: &k8s.CreateClusterRequestAutoUpgrade{ + Enable: true, + MaintenanceWindow: &k8s.MaintenanceWindow{ + StartHour: 1, + Day: k8s.MaintenanceWindowDayOfTheWeekFriday, + }, + }, + FeatureGates: []string{"HPAScaleToZero"}, + AdmissionPlugins: []string{"AlwaysPullImages"}, + OpenIDConnectConfig: &k8s.CreateClusterRequestOpenIDConnectConfig{ + IssuerURL: "http://oidcprovider.test", + ClientID: "abcd", + UsernameClaim: scw.StringPtr("username"), + UsernamePrefix: scw.StringPtr("usernameprefix"), + GroupsClaim: &[]string{"groups"}, + GroupsPrefix: scw.StringPtr("groupsprefix"), + RequiredClaim: &[]string{"verified"}, + }, + ApiserverCertSans: []string{"my-cluster.test"}, + PrivateNetworkID: scw.StringPtr(privateNetworkID), + PodCidr: &scw.IPNet{IPNet: net.IPNet{IP: net.IPv4(192, 168, 0, 0), Mask: net.IPv4Mask(255, 255, 0, 0)}}, + ServiceCidr: &scw.IPNet{IPNet: net.IPNet{IP: net.IPv4(10, 0, 0, 0), Mask: net.IPv4Mask(255, 0, 0, 0)}}, + }, gomock.Any()).Return(&k8s.Cluster{ + ID: clusterID, + Name: "test", + }, nil) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + k8sMock := mock_client.NewMockK8sAPI(mockCtrl) + + tt.expect(k8sMock.EXPECT()) + + c := &Client{ + k8s: k8sMock, + } + got, err := c.CreateCluster(tt.args.ctx, tt.args.name, tt.args.clusterType, tt.args.version, tt.args.pnID, tt.args.tags, tt.args.featureGates, tt.args.admissionPlugins, tt.args.apiServerCertSANs, tt.args.cni, tt.args.autoscalerConfig, tt.args.autoUpgrade, tt.args.openIDConnectConfig, tt.args.podCIDR, tt.args.serviceCIDR) + if (err != nil) != tt.wantErr { + t.Errorf("Client.CreateCluster() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Client.CreateCluster() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestClient_DeleteCluster(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + id string + withAdditionalResources bool + } + tests := []struct { + name string + args args + wantErr bool + expect func(d *mock_client.MockK8sAPIMockRecorder) + }{ + { + name: "delete cluster", + args: args{ + ctx: context.TODO(), + id: clusterID, + withAdditionalResources: true, + }, + expect: func(d *mock_client.MockK8sAPIMockRecorder) { + d.DeleteCluster(&k8s.DeleteClusterRequest{ + ClusterID: clusterID, + WithAdditionalResources: true, + }, gomock.Any()).Return(&k8s.Cluster{}, nil) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + k8sMock := mock_client.NewMockK8sAPI(mockCtrl) + + tt.expect(k8sMock.EXPECT()) + + c := &Client{ + k8s: k8sMock, + } + if err := c.DeleteCluster(tt.args.ctx, tt.args.id, tt.args.withAdditionalResources); (err != nil) != tt.wantErr { + t.Errorf("Client.DeleteCluster() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestClient_GetClusterKubeConfig(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + id string + } + tests := []struct { + name string + args args + want *k8s.Kubeconfig + wantErr bool + expect func(d *mock_client.MockK8sAPIMockRecorder) + }{ + { + name: "get cluster kubeconfig", + args: args{ + ctx: context.TODO(), + id: clusterID, + }, + want: &k8s.Kubeconfig{Kind: "Config"}, + expect: func(d *mock_client.MockK8sAPIMockRecorder) { + d.GetClusterKubeConfig(&k8s.GetClusterKubeConfigRequest{ + ClusterID: clusterID, + }, gomock.Any()).Return(&k8s.Kubeconfig{ + Kind: "Config", + }, nil) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + k8sMock := mock_client.NewMockK8sAPI(mockCtrl) + + tt.expect(k8sMock.EXPECT()) + + c := &Client{ + k8s: k8sMock, + } + got, err := c.GetClusterKubeConfig(tt.args.ctx, tt.args.id) + if (err != nil) != tt.wantErr { + t.Errorf("Client.GetClusterKubeConfig() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Client.GetClusterKubeConfig() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestClient_UpdateCluster(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + id string + tags *[]string + featureGates *[]string + admissionPlugins *[]string + apiServerCertSANs *[]string + autoscalerConfig *k8s.UpdateClusterRequestAutoscalerConfig + autoUpgrade *k8s.UpdateClusterRequestAutoUpgrade + openIDConnectConfig *k8s.UpdateClusterRequestOpenIDConnectConfig + } + tests := []struct { + name string + args args + wantErr bool + expect func(d *mock_client.MockK8sAPIMockRecorder) + }{ + { + name: "update cluster", + args: args{ + ctx: context.TODO(), + id: clusterID, + tags: &[]string{"tag1", "tag2"}, + featureGates: &[]string{"HPAScaleToZero"}, + admissionPlugins: &[]string{"AlwaysPullImages"}, + apiServerCertSANs: &[]string{"mycluster.test"}, + autoscalerConfig: &k8s.UpdateClusterRequestAutoscalerConfig{ + ScaleDownDisabled: scw.BoolPtr(false), + ScaleDownDelayAfterAdd: scw.StringPtr("1m"), + Estimator: k8s.AutoscalerEstimatorBinpacking, + Expander: k8s.AutoscalerExpanderMostPods, + IgnoreDaemonsetsUtilization: scw.BoolPtr(true), + BalanceSimilarNodeGroups: scw.BoolPtr(true), + ExpendablePodsPriorityCutoff: scw.Int32Ptr(1), + ScaleDownUnneededTime: scw.StringPtr("1m"), + ScaleDownUtilizationThreshold: scw.Float32Ptr(1), + MaxGracefulTerminationSec: scw.Uint32Ptr(30), + }, + autoUpgrade: &k8s.UpdateClusterRequestAutoUpgrade{ + Enable: scw.BoolPtr(true), + MaintenanceWindow: &k8s.MaintenanceWindow{ + StartHour: 1, + Day: k8s.MaintenanceWindowDayOfTheWeekFriday, + }, + }, + openIDConnectConfig: &k8s.UpdateClusterRequestOpenIDConnectConfig{ + IssuerURL: scw.StringPtr("http://oidcprovider.test"), + ClientID: scw.StringPtr("abcd"), + UsernameClaim: scw.StringPtr("username"), + UsernamePrefix: scw.StringPtr("usernameprefix"), + GroupsClaim: &[]string{"groups"}, + GroupsPrefix: scw.StringPtr("groupsprefix"), + RequiredClaim: &[]string{"verified"}, + }, + }, + expect: func(d *mock_client.MockK8sAPIMockRecorder) { + d.UpdateCluster(&k8s.UpdateClusterRequest{ + ClusterID: clusterID, + Tags: &[]string{"tag1", "tag2", createdByTag}, + AutoscalerConfig: &k8s.UpdateClusterRequestAutoscalerConfig{ + ScaleDownDisabled: scw.BoolPtr(false), + ScaleDownDelayAfterAdd: scw.StringPtr("1m"), + Estimator: k8s.AutoscalerEstimatorBinpacking, + Expander: k8s.AutoscalerExpanderMostPods, + IgnoreDaemonsetsUtilization: scw.BoolPtr(true), + BalanceSimilarNodeGroups: scw.BoolPtr(true), + ExpendablePodsPriorityCutoff: scw.Int32Ptr(1), + ScaleDownUnneededTime: scw.StringPtr("1m"), + ScaleDownUtilizationThreshold: scw.Float32Ptr(1), + MaxGracefulTerminationSec: scw.Uint32Ptr(30), + }, + AutoUpgrade: &k8s.UpdateClusterRequestAutoUpgrade{ + Enable: scw.BoolPtr(true), + MaintenanceWindow: &k8s.MaintenanceWindow{ + StartHour: 1, + Day: k8s.MaintenanceWindowDayOfTheWeekFriday, + }, + }, + FeatureGates: &[]string{"HPAScaleToZero"}, + AdmissionPlugins: &[]string{"AlwaysPullImages"}, + ApiserverCertSans: &[]string{"mycluster.test"}, + OpenIDConnectConfig: &k8s.UpdateClusterRequestOpenIDConnectConfig{ + IssuerURL: scw.StringPtr("http://oidcprovider.test"), + ClientID: scw.StringPtr("abcd"), + UsernameClaim: scw.StringPtr("username"), + UsernamePrefix: scw.StringPtr("usernameprefix"), + GroupsClaim: &[]string{"groups"}, + GroupsPrefix: scw.StringPtr("groupsprefix"), + RequiredClaim: &[]string{"verified"}, + }, + }, gomock.Any()).Return(&k8s.Cluster{ + ID: clusterID, + }, nil) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + k8sMock := mock_client.NewMockK8sAPI(mockCtrl) + + tt.expect(k8sMock.EXPECT()) + + c := &Client{ + k8s: k8sMock, + } + if err := c.UpdateCluster(tt.args.ctx, tt.args.id, tt.args.tags, tt.args.featureGates, tt.args.admissionPlugins, tt.args.apiServerCertSANs, tt.args.autoscalerConfig, tt.args.autoUpgrade, tt.args.openIDConnectConfig); (err != nil) != tt.wantErr { + t.Errorf("Client.UpdateCluster() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestClient_UpgradeCluster(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + id string + version string + } + tests := []struct { + name string + args args + wantErr bool + expect func(d *mock_client.MockK8sAPIMockRecorder) + }{ + { + name: "upgrade cluster", + args: args{ + ctx: context.TODO(), + id: clusterID, + version: "1.31.5", + }, + expect: func(d *mock_client.MockK8sAPIMockRecorder) { + d.UpgradeCluster(&k8s.UpgradeClusterRequest{ + ClusterID: clusterID, + Version: "1.31.5", + UpgradePools: false, + }, gomock.Any()).Return(&k8s.Cluster{}, nil) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + k8sMock := mock_client.NewMockK8sAPI(mockCtrl) + + tt.expect(k8sMock.EXPECT()) + + c := &Client{ + k8s: k8sMock, + } + if err := c.UpgradeCluster(tt.args.ctx, tt.args.id, tt.args.version); (err != nil) != tt.wantErr { + t.Errorf("Client.UpgradeCluster() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestClient_SetClusterType(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + id string + clusterType string + } + tests := []struct { + name string + args args + wantErr bool + expect func(d *mock_client.MockK8sAPIMockRecorder) + }{ + { + name: "set cluster type", + args: args{ + ctx: context.TODO(), + id: clusterID, + clusterType: "kapsule-dedicated-4", + }, + expect: func(d *mock_client.MockK8sAPIMockRecorder) { + d.SetClusterType(&k8s.SetClusterTypeRequest{ + ClusterID: clusterID, + Type: "kapsule-dedicated-4", + }, gomock.Any()) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + k8sMock := mock_client.NewMockK8sAPI(mockCtrl) + + tt.expect(k8sMock.EXPECT()) + + c := &Client{ + k8s: k8sMock, + } + if err := c.SetClusterType(tt.args.ctx, tt.args.id, tt.args.clusterType); (err != nil) != tt.wantErr { + t.Errorf("Client.SetClusterType() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestClient_FindPool(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + clusterID string + name string + } + tests := []struct { + name string + args args + want *k8s.Pool + wantErr bool + expect func(d *mock_client.MockK8sAPIMockRecorder) + }{ + { + name: "found pool", + args: args{ + ctx: context.TODO(), + clusterID: clusterID, + name: "mypool", + }, + want: &k8s.Pool{ + ID: poolID, + Name: "mypool", + }, + expect: func(d *mock_client.MockK8sAPIMockRecorder) { + d.ListPools(&k8s.ListPoolsRequest{ + ClusterID: clusterID, + Name: scw.StringPtr("mypool"), + }, gomock.Any(), gomock.Any()).Return(&k8s.ListPoolsResponse{ + TotalCount: 1, + Pools: []*k8s.Pool{ + { + ID: poolID, + Name: "mypool", + }, + }, + }, nil) + }, + }, + { + name: "no pool found", + args: args{ + ctx: context.TODO(), + clusterID: clusterID, + name: "mypool", + }, + wantErr: true, + expect: func(d *mock_client.MockK8sAPIMockRecorder) { + d.ListPools(&k8s.ListPoolsRequest{ + ClusterID: clusterID, + Name: scw.StringPtr("mypool"), + }, gomock.Any(), gomock.Any()).Return(&k8s.ListPoolsResponse{}, nil) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + k8sMock := mock_client.NewMockK8sAPI(mockCtrl) + + tt.expect(k8sMock.EXPECT()) + + c := &Client{ + k8s: k8sMock, + } + got, err := c.FindPool(tt.args.ctx, tt.args.clusterID, tt.args.name) + if (err != nil) != tt.wantErr { + t.Errorf("Client.FindPool() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Client.FindPool() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestClient_CreatePool(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + zone scw.Zone + clusterID string + name string + nodeType string + placementGroupID *string + securityGroupID *string + autoscaling bool + autohealing bool + publicIPDisabled bool + size uint32 + minSize *uint32 + maxSize *uint32 + tags []string + kubeletArgs map[string]string + rootVolumeType k8s.PoolVolumeType + rootVolumeSizeGB *uint64 + upgradePolicy *k8s.CreatePoolRequestUpgradePolicy + } + tests := []struct { + name string + args args + want *k8s.Pool + wantErr bool + expect func(d *mock_client.MockK8sAPIMockRecorder) + }{ + { + name: "create pool", + args: args{ + ctx: context.TODO(), + zone: scw.ZoneFrPar1, + clusterID: clusterID, + name: "mypool", + nodeType: "DEV1-S", + placementGroupID: scw.StringPtr(placementGroupID), + securityGroupID: scw.StringPtr(securityGroupID), + autoscaling: true, + autohealing: true, + publicIPDisabled: true, + size: 1, + minSize: scw.Uint32Ptr(1), + maxSize: scw.Uint32Ptr(5), + tags: []string{"tag1", "tag2"}, + kubeletArgs: map[string]string{ + "containerLogMaxFiles": "100", + "maxParallelImagePulls": "5", + }, + rootVolumeType: k8s.PoolVolumeTypeBSSD, + rootVolumeSizeGB: scw.Uint64Ptr(30), + upgradePolicy: &k8s.CreatePoolRequestUpgradePolicy{ + MaxUnavailable: scw.Uint32Ptr(0), + MaxSurge: scw.Uint32Ptr(1), + }, + }, + want: &k8s.Pool{ + ID: poolID, + Name: "mypool", + }, + expect: func(d *mock_client.MockK8sAPIMockRecorder) { + d.CreatePool(&k8s.CreatePoolRequest{ + ClusterID: clusterID, + Name: "mypool", + NodeType: "DEV1-S", + PlacementGroupID: scw.StringPtr(placementGroupID), + Autoscaling: true, + Size: 1, + MinSize: scw.Uint32Ptr(1), + MaxSize: scw.Uint32Ptr(5), + Autohealing: true, + Tags: []string{"tag1", "tag2", createdByTag}, + KubeletArgs: map[string]string{ + "containerLogMaxFiles": "100", + "maxParallelImagePulls": "5", + }, + UpgradePolicy: &k8s.CreatePoolRequestUpgradePolicy{ + MaxUnavailable: scw.Uint32Ptr(0), + MaxSurge: scw.Uint32Ptr(1), + }, + Zone: scw.ZoneFrPar1, + RootVolumeType: k8s.PoolVolumeTypeBSSD, + RootVolumeSize: scw.SizePtr(30 * scw.GB), + PublicIPDisabled: true, + SecurityGroupID: scw.StringPtr(securityGroupID), + }, gomock.Any()).Return(&k8s.Pool{ + ID: poolID, + Name: "mypool", + }, nil) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + k8sMock := mock_client.NewMockK8sAPI(mockCtrl) + + tt.expect(k8sMock.EXPECT()) + + c := &Client{ + k8s: k8sMock, + } + got, err := c.CreatePool(tt.args.ctx, tt.args.zone, tt.args.clusterID, tt.args.name, tt.args.nodeType, tt.args.placementGroupID, tt.args.securityGroupID, tt.args.autoscaling, tt.args.autohealing, tt.args.publicIPDisabled, tt.args.size, tt.args.minSize, tt.args.maxSize, tt.args.tags, tt.args.kubeletArgs, tt.args.rootVolumeType, tt.args.rootVolumeSizeGB, tt.args.upgradePolicy) + if (err != nil) != tt.wantErr { + t.Errorf("Client.CreatePool() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Client.CreatePool() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestClient_UpdatePool(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + id string + autoscaling *bool + autohealing *bool + size *uint32 + minSize *uint32 + maxSize *uint32 + tags *[]string + kubeletArgs *map[string]string + upgradePolicy *k8s.UpdatePoolRequestUpgradePolicy + } + tests := []struct { + name string + args args + wantErr bool + expect func(d *mock_client.MockK8sAPIMockRecorder) + }{ + { + name: "update pool", + args: args{ + ctx: context.TODO(), + id: poolID, + autoscaling: scw.BoolPtr(true), + autohealing: scw.BoolPtr(true), + size: scw.Uint32Ptr(1), + minSize: scw.Uint32Ptr(1), + maxSize: scw.Uint32Ptr(5), + tags: &[]string{"tag1", "tag2"}, + kubeletArgs: &map[string]string{ + "containerLogMaxFiles": "100", + "maxParallelImagePulls": "5", + }, + upgradePolicy: &k8s.UpdatePoolRequestUpgradePolicy{ + MaxUnavailable: scw.Uint32Ptr(0), + MaxSurge: scw.Uint32Ptr(1), + }, + }, + expect: func(d *mock_client.MockK8sAPIMockRecorder) { + d.UpdatePool(&k8s.UpdatePoolRequest{ + PoolID: poolID, + Autoscaling: scw.BoolPtr(true), + Autohealing: scw.BoolPtr(true), + Size: scw.Uint32Ptr(1), + MinSize: scw.Uint32Ptr(1), + MaxSize: scw.Uint32Ptr(5), + Tags: &[]string{"tag1", "tag2", createdByTag}, + KubeletArgs: &map[string]string{ + "containerLogMaxFiles": "100", + "maxParallelImagePulls": "5", + }, + UpgradePolicy: &k8s.UpdatePoolRequestUpgradePolicy{ + MaxUnavailable: scw.Uint32Ptr(0), + MaxSurge: scw.Uint32Ptr(1), + }, + }, gomock.Any()).Return(&k8s.Pool{}, nil) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + k8sMock := mock_client.NewMockK8sAPI(mockCtrl) + + tt.expect(k8sMock.EXPECT()) + + c := &Client{ + k8s: k8sMock, + } + if err := c.UpdatePool(tt.args.ctx, tt.args.id, tt.args.autoscaling, tt.args.autohealing, tt.args.size, tt.args.minSize, tt.args.maxSize, tt.args.tags, tt.args.kubeletArgs, tt.args.upgradePolicy); (err != nil) != tt.wantErr { + t.Errorf("Client.UpdatePool() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestClient_UpgradePool(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + id string + version string + } + tests := []struct { + name string + args args + wantErr bool + expect func(d *mock_client.MockK8sAPIMockRecorder) + }{ + { + name: "upgrade pool", + args: args{ + ctx: context.TODO(), + id: poolID, + version: "1.31.1", + }, + expect: func(d *mock_client.MockK8sAPIMockRecorder) { + d.UpgradePool(&k8s.UpgradePoolRequest{ + PoolID: poolID, + Version: "1.31.1", + }, gomock.Any()).Return(&k8s.Pool{}, nil) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + k8sMock := mock_client.NewMockK8sAPI(mockCtrl) + + tt.expect(k8sMock.EXPECT()) + + c := &Client{ + k8s: k8sMock, + } + if err := c.UpgradePool(tt.args.ctx, tt.args.id, tt.args.version); (err != nil) != tt.wantErr { + t.Errorf("Client.UpgradePool() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestClient_DeletePool(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + id string + } + tests := []struct { + name string + args args + wantErr bool + expect func(d *mock_client.MockK8sAPIMockRecorder) + }{ + { + name: "delete pool", + args: args{ + ctx: context.TODO(), + id: poolID, + }, + expect: func(d *mock_client.MockK8sAPIMockRecorder) { + d.DeletePool(&k8s.DeletePoolRequest{ + PoolID: poolID, + }, gomock.Any()).Return(&k8s.Pool{}, nil) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + k8sMock := mock_client.NewMockK8sAPI(mockCtrl) + + tt.expect(k8sMock.EXPECT()) + + c := &Client{ + k8s: k8sMock, + } + if err := c.DeletePool(tt.args.ctx, tt.args.id); (err != nil) != tt.wantErr { + t.Errorf("Client.DeletePool() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} + +func TestClient_ListNodes(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + clusterID string + poolID string + } + tests := []struct { + name string + args args + want []*k8s.Node + wantErr bool + expect func(d *mock_client.MockK8sAPIMockRecorder) + }{ + { + name: "list nodes", + args: args{ + ctx: context.TODO(), + clusterID: clusterID, + poolID: poolID, + }, + want: []*k8s.Node{ + { + Name: "node1", + }, + { + Name: "node2", + }, + }, + expect: func(d *mock_client.MockK8sAPIMockRecorder) { + d.ListNodes(&k8s.ListNodesRequest{ + ClusterID: clusterID, + PoolID: scw.StringPtr(poolID), + }, gomock.Any(), gomock.Any()).Return(&k8s.ListNodesResponse{ + TotalCount: 2, + Nodes: []*k8s.Node{ + { + Name: "node1", + }, + { + Name: "node2", + }, + }, + }, nil) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + k8sMock := mock_client.NewMockK8sAPI(mockCtrl) + + tt.expect(k8sMock.EXPECT()) + + c := &Client{ + k8s: k8sMock, + } + got, err := c.ListNodes(tt.args.ctx, tt.args.clusterID, tt.args.poolID) + if (err != nil) != tt.wantErr { + t.Errorf("Client.ListNodes() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Client.ListNodes() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestClient_ListClusterACLRules(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + clusterID string + } + tests := []struct { + name string + args args + want []*k8s.ACLRule + wantErr bool + expect func(d *mock_client.MockK8sAPIMockRecorder) + }{ + { + name: "list cluster acls", + args: args{ + ctx: context.TODO(), + clusterID: clusterID, + }, + want: []*k8s.ACLRule{ + { + ID: aclID1, + }, + { + ID: aclID2, + }, + }, + expect: func(d *mock_client.MockK8sAPIMockRecorder) { + d.ListClusterACLRules(&k8s.ListClusterACLRulesRequest{ + ClusterID: clusterID, + }, gomock.Any(), gomock.Any()).Return(&k8s.ListClusterACLRulesResponse{ + TotalCount: 2, + Rules: []*k8s.ACLRule{ + { + ID: aclID1, + }, + { + ID: aclID2, + }, + }, + }, nil) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + k8sMock := mock_client.NewMockK8sAPI(mockCtrl) + + tt.expect(k8sMock.EXPECT()) + + c := &Client{ + k8s: k8sMock, + } + got, err := c.ListClusterACLRules(tt.args.ctx, tt.args.clusterID) + if (err != nil) != tt.wantErr { + t.Errorf("Client.ListClusterACLRules() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("Client.ListClusterACLRules() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestClient_SetClusterACLRules(t *testing.T) { + t.Parallel() + type args struct { + ctx context.Context + clusterID string + rules []*k8s.ACLRuleRequest + } + tests := []struct { + name string + args args + wantErr bool + expect func(d *mock_client.MockK8sAPIMockRecorder) + }{ + { + name: "set cluster acls", + args: args{ + ctx: context.TODO(), + clusterID: clusterID, + rules: []*k8s.ACLRuleRequest{ + { + ScalewayRanges: scw.BoolPtr(true), + }, + { + IP: &scw.IPNet{IPNet: net.IPNet{IP: net.IPv4(0, 0, 0, 0), Mask: net.IPv4Mask(0, 0, 0, 0)}}, + }, + }, + }, + expect: func(d *mock_client.MockK8sAPIMockRecorder) { + d.SetClusterACLRules(&k8s.SetClusterACLRulesRequest{ + ClusterID: clusterID, + ACLs: []*k8s.ACLRuleRequest{ + { + ScalewayRanges: scw.BoolPtr(true), + Description: createdByDescription, + }, + { + IP: &scw.IPNet{IPNet: net.IPNet{IP: net.IPv4(0, 0, 0, 0), Mask: net.IPv4Mask(0, 0, 0, 0)}}, + Description: createdByDescription, + }, + }, + }, gomock.Any()).Return(&k8s.SetClusterACLRulesResponse{}, nil) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + k8sMock := mock_client.NewMockK8sAPI(mockCtrl) + + tt.expect(k8sMock.EXPECT()) + + c := &Client{ + k8s: k8sMock, + } + if err := c.SetClusterACLRules(tt.args.ctx, tt.args.clusterID, tt.args.rules); (err != nil) != tt.wantErr { + t.Errorf("Client.SetClusterACLRules() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} diff --git a/internal/service/scaleway/client/mock_client/client_mock.go b/internal/service/scaleway/client/mock_client/client_mock.go index d8fba85..aa217d5 100644 --- a/internal/service/scaleway/client/mock_client/client_mock.go +++ b/internal/service/scaleway/client/mock_client/client_mock.go @@ -18,6 +18,7 @@ import ( domain "github.com/scaleway/scaleway-sdk-go/api/domain/v2beta1" instance "github.com/scaleway/scaleway-sdk-go/api/instance/v1" ipam "github.com/scaleway/scaleway-sdk-go/api/ipam/v1" + k8s "github.com/scaleway/scaleway-sdk-go/api/k8s/v1" lb "github.com/scaleway/scaleway-sdk-go/api/lb/v1" marketplace "github.com/scaleway/scaleway-sdk-go/api/marketplace/v2" vpc "github.com/scaleway/scaleway-sdk-go/api/vpc/v2" @@ -203,6 +204,45 @@ func (c *MockInterfaceCreateBackendCall) DoAndReturn(f func(context.Context, scw return c } +// CreateCluster mocks base method. +func (m *MockInterface) CreateCluster(ctx context.Context, name, clusterType, version string, pnID *string, tags, featureGates, admissionPlugins, apiServerCertSANs []string, cni k8s.CNI, autoscalerConfig *k8s.CreateClusterRequestAutoscalerConfig, autoUpgrade *k8s.CreateClusterRequestAutoUpgrade, openIDConnectConfig *k8s.CreateClusterRequestOpenIDConnectConfig, podCIDR, serviceCIDR scw.IPNet) (*k8s.Cluster, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateCluster", ctx, name, clusterType, version, pnID, tags, featureGates, admissionPlugins, apiServerCertSANs, cni, autoscalerConfig, autoUpgrade, openIDConnectConfig, podCIDR, serviceCIDR) + ret0, _ := ret[0].(*k8s.Cluster) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateCluster indicates an expected call of CreateCluster. +func (mr *MockInterfaceMockRecorder) CreateCluster(ctx, name, clusterType, version, pnID, tags, featureGates, admissionPlugins, apiServerCertSANs, cni, autoscalerConfig, autoUpgrade, openIDConnectConfig, podCIDR, serviceCIDR any) *MockInterfaceCreateClusterCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateCluster", reflect.TypeOf((*MockInterface)(nil).CreateCluster), ctx, name, clusterType, version, pnID, tags, featureGates, admissionPlugins, apiServerCertSANs, cni, autoscalerConfig, autoUpgrade, openIDConnectConfig, podCIDR, serviceCIDR) + return &MockInterfaceCreateClusterCall{Call: call} +} + +// MockInterfaceCreateClusterCall wrap *gomock.Call +type MockInterfaceCreateClusterCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockInterfaceCreateClusterCall) Return(arg0 *k8s.Cluster, arg1 error) *MockInterfaceCreateClusterCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockInterfaceCreateClusterCall) Do(f func(context.Context, string, string, string, *string, []string, []string, []string, []string, k8s.CNI, *k8s.CreateClusterRequestAutoscalerConfig, *k8s.CreateClusterRequestAutoUpgrade, *k8s.CreateClusterRequestOpenIDConnectConfig, scw.IPNet, scw.IPNet) (*k8s.Cluster, error)) *MockInterfaceCreateClusterCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockInterfaceCreateClusterCall) DoAndReturn(f func(context.Context, string, string, string, *string, []string, []string, []string, []string, k8s.CNI, *k8s.CreateClusterRequestAutoscalerConfig, *k8s.CreateClusterRequestAutoUpgrade, *k8s.CreateClusterRequestOpenIDConnectConfig, scw.IPNet, scw.IPNet) (*k8s.Cluster, error)) *MockInterfaceCreateClusterCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + // CreateFrontend mocks base method. func (m *MockInterface) CreateFrontend(ctx context.Context, zone scw.Zone, lbID, name, backendID string, port int32) (*lb.Frontend, error) { m.ctrl.T.Helper() @@ -435,6 +475,45 @@ func (c *MockInterfaceCreateLBACLCall) DoAndReturn(f func(context.Context, scw.Z return c } +// CreatePool mocks base method. +func (m *MockInterface) CreatePool(ctx context.Context, zone scw.Zone, clusterID, name, nodeType string, placementGroupID, securityGroupID *string, autoscaling, autohealing, publicIPDisabled bool, size uint32, minSize, maxSize *uint32, tags []string, kubeletArgs map[string]string, rootVolumeType k8s.PoolVolumeType, rootVolumeSizeGB *uint64, upgradePolicy *k8s.CreatePoolRequestUpgradePolicy) (*k8s.Pool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreatePool", ctx, zone, clusterID, name, nodeType, placementGroupID, securityGroupID, autoscaling, autohealing, publicIPDisabled, size, minSize, maxSize, tags, kubeletArgs, rootVolumeType, rootVolumeSizeGB, upgradePolicy) + ret0, _ := ret[0].(*k8s.Pool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreatePool indicates an expected call of CreatePool. +func (mr *MockInterfaceMockRecorder) CreatePool(ctx, zone, clusterID, name, nodeType, placementGroupID, securityGroupID, autoscaling, autohealing, publicIPDisabled, size, minSize, maxSize, tags, kubeletArgs, rootVolumeType, rootVolumeSizeGB, upgradePolicy any) *MockInterfaceCreatePoolCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreatePool", reflect.TypeOf((*MockInterface)(nil).CreatePool), ctx, zone, clusterID, name, nodeType, placementGroupID, securityGroupID, autoscaling, autohealing, publicIPDisabled, size, minSize, maxSize, tags, kubeletArgs, rootVolumeType, rootVolumeSizeGB, upgradePolicy) + return &MockInterfaceCreatePoolCall{Call: call} +} + +// MockInterfaceCreatePoolCall wrap *gomock.Call +type MockInterfaceCreatePoolCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockInterfaceCreatePoolCall) Return(arg0 *k8s.Pool, arg1 error) *MockInterfaceCreatePoolCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockInterfaceCreatePoolCall) Do(f func(context.Context, scw.Zone, string, string, string, *string, *string, bool, bool, bool, uint32, *uint32, *uint32, []string, map[string]string, k8s.PoolVolumeType, *uint64, *k8s.CreatePoolRequestUpgradePolicy) (*k8s.Pool, error)) *MockInterfaceCreatePoolCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockInterfaceCreatePoolCall) DoAndReturn(f func(context.Context, scw.Zone, string, string, string, *string, *string, bool, bool, bool, uint32, *uint32, *uint32, []string, map[string]string, k8s.PoolVolumeType, *uint64, *k8s.CreatePoolRequestUpgradePolicy) (*k8s.Pool, error)) *MockInterfaceCreatePoolCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + // CreatePrivateNIC mocks base method. func (m *MockInterface) CreatePrivateNIC(ctx context.Context, zone scw.Zone, serverID, privateNetworkID string) (*instance.PrivateNIC, error) { m.ctrl.T.Helper() @@ -590,6 +669,44 @@ func (c *MockInterfaceDefaultZoneCall) DoAndReturn(f func() scw.Zone) *MockInter return c } +// DeleteCluster mocks base method. +func (m *MockInterface) DeleteCluster(ctx context.Context, id string, withAdditionalResources bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteCluster", ctx, id, withAdditionalResources) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteCluster indicates an expected call of DeleteCluster. +func (mr *MockInterfaceMockRecorder) DeleteCluster(ctx, id, withAdditionalResources any) *MockInterfaceDeleteClusterCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCluster", reflect.TypeOf((*MockInterface)(nil).DeleteCluster), ctx, id, withAdditionalResources) + return &MockInterfaceDeleteClusterCall{Call: call} +} + +// MockInterfaceDeleteClusterCall wrap *gomock.Call +type MockInterfaceDeleteClusterCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockInterfaceDeleteClusterCall) Return(arg0 error) *MockInterfaceDeleteClusterCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockInterfaceDeleteClusterCall) Do(f func(context.Context, string, bool) error) *MockInterfaceDeleteClusterCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockInterfaceDeleteClusterCall) DoAndReturn(f func(context.Context, string, bool) error) *MockInterfaceDeleteClusterCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + // DeleteDNSZoneRecords mocks base method. func (m *MockInterface) DeleteDNSZoneRecords(ctx context.Context, zone, name string) error { m.ctrl.T.Helper() @@ -818,6 +935,44 @@ func (c *MockInterfaceDeleteLBACLCall) DoAndReturn(f func(context.Context, scw.Z return c } +// DeletePool mocks base method. +func (m *MockInterface) DeletePool(ctx context.Context, id string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeletePool", ctx, id) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeletePool indicates an expected call of DeletePool. +func (mr *MockInterfaceMockRecorder) DeletePool(ctx, id any) *MockInterfaceDeletePoolCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePool", reflect.TypeOf((*MockInterface)(nil).DeletePool), ctx, id) + return &MockInterfaceDeletePoolCall{Call: call} +} + +// MockInterfaceDeletePoolCall wrap *gomock.Call +type MockInterfaceDeletePoolCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockInterfaceDeletePoolCall) Return(arg0 error) *MockInterfaceDeletePoolCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockInterfaceDeletePoolCall) Do(f func(context.Context, string) error) *MockInterfaceDeletePoolCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockInterfaceDeletePoolCall) DoAndReturn(f func(context.Context, string) error) *MockInterfaceDeletePoolCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + // DeletePrivateNetwork mocks base method. func (m *MockInterface) DeletePrivateNetwork(ctx context.Context, id string) error { m.ctrl.T.Helper() @@ -1086,6 +1241,45 @@ func (c *MockInterfaceFindBackendCall) DoAndReturn(f func(context.Context, scw.Z return c } +// FindCluster mocks base method. +func (m *MockInterface) FindCluster(ctx context.Context, name string) (*k8s.Cluster, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FindCluster", ctx, name) + ret0, _ := ret[0].(*k8s.Cluster) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FindCluster indicates an expected call of FindCluster. +func (mr *MockInterfaceMockRecorder) FindCluster(ctx, name any) *MockInterfaceFindClusterCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindCluster", reflect.TypeOf((*MockInterface)(nil).FindCluster), ctx, name) + return &MockInterfaceFindClusterCall{Call: call} +} + +// MockInterfaceFindClusterCall wrap *gomock.Call +type MockInterfaceFindClusterCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockInterfaceFindClusterCall) Return(arg0 *k8s.Cluster, arg1 error) *MockInterfaceFindClusterCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockInterfaceFindClusterCall) Do(f func(context.Context, string) (*k8s.Cluster, error)) *MockInterfaceFindClusterCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockInterfaceFindClusterCall) DoAndReturn(f func(context.Context, string) (*k8s.Cluster, error)) *MockInterfaceFindClusterCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + // FindFrontend mocks base method. func (m *MockInterface) FindFrontend(ctx context.Context, zone scw.Zone, lbID, name string) (*lb.Frontend, error) { m.ctrl.T.Helper() @@ -1593,6 +1787,45 @@ func (c *MockInterfaceFindPlacementGroupCall) DoAndReturn(f func(context.Context return c } +// FindPool mocks base method. +func (m *MockInterface) FindPool(ctx context.Context, clusterID, name string) (*k8s.Pool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FindPool", ctx, clusterID, name) + ret0, _ := ret[0].(*k8s.Pool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FindPool indicates an expected call of FindPool. +func (mr *MockInterfaceMockRecorder) FindPool(ctx, clusterID, name any) *MockInterfaceFindPoolCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindPool", reflect.TypeOf((*MockInterface)(nil).FindPool), ctx, clusterID, name) + return &MockInterfaceFindPoolCall{Call: call} +} + +// MockInterfaceFindPoolCall wrap *gomock.Call +type MockInterfaceFindPoolCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockInterfaceFindPoolCall) Return(arg0 *k8s.Pool, arg1 error) *MockInterfaceFindPoolCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockInterfaceFindPoolCall) Do(f func(context.Context, string, string) (*k8s.Pool, error)) *MockInterfaceFindPoolCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockInterfaceFindPoolCall) DoAndReturn(f func(context.Context, string, string) (*k8s.Pool, error)) *MockInterfaceFindPoolCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + // FindPrivateNICIPs mocks base method. func (m *MockInterface) FindPrivateNICIPs(ctx context.Context, privateNICID string) ([]*ipam.IP, error) { m.ctrl.T.Helper() @@ -1827,6 +2060,45 @@ func (c *MockInterfaceGetAllServerUserDataCall) DoAndReturn(f func(context.Conte return c } +// GetClusterKubeConfig mocks base method. +func (m *MockInterface) GetClusterKubeConfig(ctx context.Context, id string) (*k8s.Kubeconfig, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetClusterKubeConfig", ctx, id) + ret0, _ := ret[0].(*k8s.Kubeconfig) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetClusterKubeConfig indicates an expected call of GetClusterKubeConfig. +func (mr *MockInterfaceMockRecorder) GetClusterKubeConfig(ctx, id any) *MockInterfaceGetClusterKubeConfigCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClusterKubeConfig", reflect.TypeOf((*MockInterface)(nil).GetClusterKubeConfig), ctx, id) + return &MockInterfaceGetClusterKubeConfigCall{Call: call} +} + +// MockInterfaceGetClusterKubeConfigCall wrap *gomock.Call +type MockInterfaceGetClusterKubeConfigCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockInterfaceGetClusterKubeConfigCall) Return(arg0 *k8s.Kubeconfig, arg1 error) *MockInterfaceGetClusterKubeConfigCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockInterfaceGetClusterKubeConfigCall) Do(f func(context.Context, string) (*k8s.Kubeconfig, error)) *MockInterfaceGetClusterKubeConfigCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockInterfaceGetClusterKubeConfigCall) DoAndReturn(f func(context.Context, string) (*k8s.Kubeconfig, error)) *MockInterfaceGetClusterKubeConfigCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + // GetControlPlaneZones mocks base method. func (m *MockInterface) GetControlPlaneZones() []scw.Zone { m.ctrl.T.Helper() @@ -1943,6 +2215,44 @@ func (c *MockInterfaceGetPrivateNetworkCall) DoAndReturn(f func(context.Context, return c } +// GetSecretKey mocks base method. +func (m *MockInterface) GetSecretKey() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSecretKey") + ret0, _ := ret[0].(string) + return ret0 +} + +// GetSecretKey indicates an expected call of GetSecretKey. +func (mr *MockInterfaceMockRecorder) GetSecretKey() *MockInterfaceGetSecretKeyCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSecretKey", reflect.TypeOf((*MockInterface)(nil).GetSecretKey)) + return &MockInterfaceGetSecretKeyCall{Call: call} +} + +// MockInterfaceGetSecretKeyCall wrap *gomock.Call +type MockInterfaceGetSecretKeyCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockInterfaceGetSecretKeyCall) Return(arg0 string) *MockInterfaceGetSecretKeyCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockInterfaceGetSecretKeyCall) Do(f func() string) *MockInterfaceGetSecretKeyCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockInterfaceGetSecretKeyCall) DoAndReturn(f func() string) *MockInterfaceGetSecretKeyCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + // GetZoneOrDefault mocks base method. func (m *MockInterface) GetZoneOrDefault(zone *string) (scw.Zone, error) { m.ctrl.T.Helper() @@ -1982,6 +2292,45 @@ func (c *MockInterfaceGetZoneOrDefaultCall) DoAndReturn(f func(*string) (scw.Zon return c } +// ListClusterACLRules mocks base method. +func (m *MockInterface) ListClusterACLRules(ctx context.Context, clusterID string) ([]*k8s.ACLRule, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListClusterACLRules", ctx, clusterID) + ret0, _ := ret[0].([]*k8s.ACLRule) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListClusterACLRules indicates an expected call of ListClusterACLRules. +func (mr *MockInterfaceMockRecorder) ListClusterACLRules(ctx, clusterID any) *MockInterfaceListClusterACLRulesCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListClusterACLRules", reflect.TypeOf((*MockInterface)(nil).ListClusterACLRules), ctx, clusterID) + return &MockInterfaceListClusterACLRulesCall{Call: call} +} + +// MockInterfaceListClusterACLRulesCall wrap *gomock.Call +type MockInterfaceListClusterACLRulesCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockInterfaceListClusterACLRulesCall) Return(arg0 []*k8s.ACLRule, arg1 error) *MockInterfaceListClusterACLRulesCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockInterfaceListClusterACLRulesCall) Do(f func(context.Context, string) ([]*k8s.ACLRule, error)) *MockInterfaceListClusterACLRulesCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockInterfaceListClusterACLRulesCall) DoAndReturn(f func(context.Context, string) ([]*k8s.ACLRule, error)) *MockInterfaceListClusterACLRulesCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + // ListDNSZoneRecords mocks base method. func (m *MockInterface) ListDNSZoneRecords(ctx context.Context, zone, name string) ([]*domain.Record, error) { m.ctrl.T.Helper() @@ -2099,6 +2448,45 @@ func (c *MockInterfaceListLBACLsCall) DoAndReturn(f func(context.Context, scw.Zo return c } +// ListNodes mocks base method. +func (m *MockInterface) ListNodes(ctx context.Context, clusterID, poolID string) ([]*k8s.Node, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListNodes", ctx, clusterID, poolID) + ret0, _ := ret[0].([]*k8s.Node) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListNodes indicates an expected call of ListNodes. +func (mr *MockInterfaceMockRecorder) ListNodes(ctx, clusterID, poolID any) *MockInterfaceListNodesCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListNodes", reflect.TypeOf((*MockInterface)(nil).ListNodes), ctx, clusterID, poolID) + return &MockInterfaceListNodesCall{Call: call} +} + +// MockInterfaceListNodesCall wrap *gomock.Call +type MockInterfaceListNodesCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockInterfaceListNodesCall) Return(arg0 []*k8s.Node, arg1 error) *MockInterfaceListNodesCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockInterfaceListNodesCall) Do(f func(context.Context, string, string) ([]*k8s.Node, error)) *MockInterfaceListNodesCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockInterfaceListNodesCall) DoAndReturn(f func(context.Context, string, string) ([]*k8s.Node, error)) *MockInterfaceListNodesCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + // MigrateLB mocks base method. func (m *MockInterface) MigrateLB(ctx context.Context, zone scw.Zone, id, newType string) (*lb.LB, error) { m.ctrl.T.Helper() @@ -2253,6 +2641,82 @@ func (c *MockInterfaceSetBackendServersCall) DoAndReturn(f func(context.Context, return c } +// SetClusterACLRules mocks base method. +func (m *MockInterface) SetClusterACLRules(ctx context.Context, clusterID string, rules []*k8s.ACLRuleRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetClusterACLRules", ctx, clusterID, rules) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetClusterACLRules indicates an expected call of SetClusterACLRules. +func (mr *MockInterfaceMockRecorder) SetClusterACLRules(ctx, clusterID, rules any) *MockInterfaceSetClusterACLRulesCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetClusterACLRules", reflect.TypeOf((*MockInterface)(nil).SetClusterACLRules), ctx, clusterID, rules) + return &MockInterfaceSetClusterACLRulesCall{Call: call} +} + +// MockInterfaceSetClusterACLRulesCall wrap *gomock.Call +type MockInterfaceSetClusterACLRulesCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockInterfaceSetClusterACLRulesCall) Return(arg0 error) *MockInterfaceSetClusterACLRulesCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockInterfaceSetClusterACLRulesCall) Do(f func(context.Context, string, []*k8s.ACLRuleRequest) error) *MockInterfaceSetClusterACLRulesCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockInterfaceSetClusterACLRulesCall) DoAndReturn(f func(context.Context, string, []*k8s.ACLRuleRequest) error) *MockInterfaceSetClusterACLRulesCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetClusterType mocks base method. +func (m *MockInterface) SetClusterType(ctx context.Context, id, clusterType string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetClusterType", ctx, id, clusterType) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetClusterType indicates an expected call of SetClusterType. +func (mr *MockInterfaceMockRecorder) SetClusterType(ctx, id, clusterType any) *MockInterfaceSetClusterTypeCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetClusterType", reflect.TypeOf((*MockInterface)(nil).SetClusterType), ctx, id, clusterType) + return &MockInterfaceSetClusterTypeCall{Call: call} +} + +// MockInterfaceSetClusterTypeCall wrap *gomock.Call +type MockInterfaceSetClusterTypeCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockInterfaceSetClusterTypeCall) Return(arg0 error) *MockInterfaceSetClusterTypeCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockInterfaceSetClusterTypeCall) Do(f func(context.Context, string, string) error) *MockInterfaceSetClusterTypeCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockInterfaceSetClusterTypeCall) DoAndReturn(f func(context.Context, string, string) error) *MockInterfaceSetClusterTypeCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + // SetDNSZoneRecords mocks base method. func (m *MockInterface) SetDNSZoneRecords(ctx context.Context, zone, name string, ips []string) error { m.ctrl.T.Helper() @@ -2367,6 +2831,44 @@ func (c *MockInterfaceSetServerUserDataCall) DoAndReturn(f func(context.Context, return c } +// UpdateCluster mocks base method. +func (m *MockInterface) UpdateCluster(ctx context.Context, id string, tags, featureGates, admissionPlugins, apiServerCertSANs *[]string, autoscalerConfig *k8s.UpdateClusterRequestAutoscalerConfig, autoUpgrade *k8s.UpdateClusterRequestAutoUpgrade, openIDConnectConfig *k8s.UpdateClusterRequestOpenIDConnectConfig) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateCluster", ctx, id, tags, featureGates, admissionPlugins, apiServerCertSANs, autoscalerConfig, autoUpgrade, openIDConnectConfig) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateCluster indicates an expected call of UpdateCluster. +func (mr *MockInterfaceMockRecorder) UpdateCluster(ctx, id, tags, featureGates, admissionPlugins, apiServerCertSANs, autoscalerConfig, autoUpgrade, openIDConnectConfig any) *MockInterfaceUpdateClusterCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateCluster", reflect.TypeOf((*MockInterface)(nil).UpdateCluster), ctx, id, tags, featureGates, admissionPlugins, apiServerCertSANs, autoscalerConfig, autoUpgrade, openIDConnectConfig) + return &MockInterfaceUpdateClusterCall{Call: call} +} + +// MockInterfaceUpdateClusterCall wrap *gomock.Call +type MockInterfaceUpdateClusterCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockInterfaceUpdateClusterCall) Return(arg0 error) *MockInterfaceUpdateClusterCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockInterfaceUpdateClusterCall) Do(f func(context.Context, string, *[]string, *[]string, *[]string, *[]string, *k8s.UpdateClusterRequestAutoscalerConfig, *k8s.UpdateClusterRequestAutoUpgrade, *k8s.UpdateClusterRequestOpenIDConnectConfig) error) *MockInterfaceUpdateClusterCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockInterfaceUpdateClusterCall) DoAndReturn(f func(context.Context, string, *[]string, *[]string, *[]string, *[]string, *k8s.UpdateClusterRequestAutoscalerConfig, *k8s.UpdateClusterRequestAutoUpgrade, *k8s.UpdateClusterRequestOpenIDConnectConfig) error) *MockInterfaceUpdateClusterCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + // UpdateInstanceVolumeTags mocks base method. func (m *MockInterface) UpdateInstanceVolumeTags(ctx context.Context, zone scw.Zone, volumeID string, tags []string) error { m.ctrl.T.Helper() @@ -2443,6 +2945,44 @@ func (c *MockInterfaceUpdateLBACLCall) DoAndReturn(f func(context.Context, scw.Z return c } +// UpdatePool mocks base method. +func (m *MockInterface) UpdatePool(ctx context.Context, id string, autoscaling, autohealing *bool, size, minSize, maxSize *uint32, tags *[]string, kubeletArgs *map[string]string, upgradePolicy *k8s.UpdatePoolRequestUpgradePolicy) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdatePool", ctx, id, autoscaling, autohealing, size, minSize, maxSize, tags, kubeletArgs, upgradePolicy) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdatePool indicates an expected call of UpdatePool. +func (mr *MockInterfaceMockRecorder) UpdatePool(ctx, id, autoscaling, autohealing, size, minSize, maxSize, tags, kubeletArgs, upgradePolicy any) *MockInterfaceUpdatePoolCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePool", reflect.TypeOf((*MockInterface)(nil).UpdatePool), ctx, id, autoscaling, autohealing, size, minSize, maxSize, tags, kubeletArgs, upgradePolicy) + return &MockInterfaceUpdatePoolCall{Call: call} +} + +// MockInterfaceUpdatePoolCall wrap *gomock.Call +type MockInterfaceUpdatePoolCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockInterfaceUpdatePoolCall) Return(arg0 error) *MockInterfaceUpdatePoolCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockInterfaceUpdatePoolCall) Do(f func(context.Context, string, *bool, *bool, *uint32, *uint32, *uint32, *[]string, *map[string]string, *k8s.UpdatePoolRequestUpgradePolicy) error) *MockInterfaceUpdatePoolCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockInterfaceUpdatePoolCall) DoAndReturn(f func(context.Context, string, *bool, *bool, *uint32, *uint32, *uint32, *[]string, *map[string]string, *k8s.UpdatePoolRequestUpgradePolicy) error) *MockInterfaceUpdatePoolCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + // UpdateServerPublicIPs mocks base method. func (m *MockInterface) UpdateServerPublicIPs(ctx context.Context, zone scw.Zone, id string, publicIPIDs []string) (*instance.Server, error) { m.ctrl.T.Helper() @@ -2558,6 +3098,44 @@ func (c *MockInterfaceUpdateVolumeTagsCall) DoAndReturn(f func(context.Context, return c } +// UpgradeCluster mocks base method. +func (m *MockInterface) UpgradeCluster(ctx context.Context, id, version string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpgradeCluster", ctx, id, version) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpgradeCluster indicates an expected call of UpgradeCluster. +func (mr *MockInterfaceMockRecorder) UpgradeCluster(ctx, id, version any) *MockInterfaceUpgradeClusterCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpgradeCluster", reflect.TypeOf((*MockInterface)(nil).UpgradeCluster), ctx, id, version) + return &MockInterfaceUpgradeClusterCall{Call: call} +} + +// MockInterfaceUpgradeClusterCall wrap *gomock.Call +type MockInterfaceUpgradeClusterCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockInterfaceUpgradeClusterCall) Return(arg0 error) *MockInterfaceUpgradeClusterCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockInterfaceUpgradeClusterCall) Do(f func(context.Context, string, string) error) *MockInterfaceUpgradeClusterCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockInterfaceUpgradeClusterCall) DoAndReturn(f func(context.Context, string, string) error) *MockInterfaceUpgradeClusterCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + // UpgradeGateway mocks base method. func (m *MockInterface) UpgradeGateway(ctx context.Context, zone scw.Zone, gatewayID, newType string) (*vpcgw.Gateway, error) { m.ctrl.T.Helper() @@ -2596,3 +3174,41 @@ func (c *MockInterfaceUpgradeGatewayCall) DoAndReturn(f func(context.Context, sc c.Call = c.Call.DoAndReturn(f) return c } + +// UpgradePool mocks base method. +func (m *MockInterface) UpgradePool(ctx context.Context, id, version string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpgradePool", ctx, id, version) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpgradePool indicates an expected call of UpgradePool. +func (mr *MockInterfaceMockRecorder) UpgradePool(ctx, id, version any) *MockInterfaceUpgradePoolCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpgradePool", reflect.TypeOf((*MockInterface)(nil).UpgradePool), ctx, id, version) + return &MockInterfaceUpgradePoolCall{Call: call} +} + +// MockInterfaceUpgradePoolCall wrap *gomock.Call +type MockInterfaceUpgradePoolCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockInterfaceUpgradePoolCall) Return(arg0 error) *MockInterfaceUpgradePoolCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockInterfaceUpgradePoolCall) Do(f func(context.Context, string, string) error) *MockInterfaceUpgradePoolCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockInterfaceUpgradePoolCall) DoAndReturn(f func(context.Context, string, string) error) *MockInterfaceUpgradePoolCall { + c.Call = c.Call.DoAndReturn(f) + return c +} diff --git a/internal/service/scaleway/client/mock_client/config_mock.go b/internal/service/scaleway/client/mock_client/config_mock.go new file mode 100644 index 0000000..a292490 --- /dev/null +++ b/internal/service/scaleway/client/mock_client/config_mock.go @@ -0,0 +1,78 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: ../config.go +// +// Generated by this command: +// +// mockgen -destination config_mock.go -package mock_client -source ../config.go -typed +// + +// Package mock_client is a generated GoMock package. +package mock_client + +import ( + reflect "reflect" + + gomock "go.uber.org/mock/gomock" +) + +// MockConfig is a mock of Config interface. +type MockConfig struct { + ctrl *gomock.Controller + recorder *MockConfigMockRecorder + isgomock struct{} +} + +// MockConfigMockRecorder is the mock recorder for MockConfig. +type MockConfigMockRecorder struct { + mock *MockConfig +} + +// NewMockConfig creates a new mock instance. +func NewMockConfig(ctrl *gomock.Controller) *MockConfig { + mock := &MockConfig{ctrl: ctrl} + mock.recorder = &MockConfigMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockConfig) EXPECT() *MockConfigMockRecorder { + return m.recorder +} + +// GetSecretKey mocks base method. +func (m *MockConfig) GetSecretKey() string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSecretKey") + ret0, _ := ret[0].(string) + return ret0 +} + +// GetSecretKey indicates an expected call of GetSecretKey. +func (mr *MockConfigMockRecorder) GetSecretKey() *MockConfigGetSecretKeyCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSecretKey", reflect.TypeOf((*MockConfig)(nil).GetSecretKey)) + return &MockConfigGetSecretKeyCall{Call: call} +} + +// MockConfigGetSecretKeyCall wrap *gomock.Call +type MockConfigGetSecretKeyCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockConfigGetSecretKeyCall) Return(arg0 string) *MockConfigGetSecretKeyCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockConfigGetSecretKeyCall) Do(f func() string) *MockConfigGetSecretKeyCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockConfigGetSecretKeyCall) DoAndReturn(f func() string) *MockConfigGetSecretKeyCall { + c.Call = c.Call.DoAndReturn(f) + return c +} diff --git a/internal/service/scaleway/client/mock_client/doc.go b/internal/service/scaleway/client/mock_client/doc.go index 7840df9..a728a6b 100644 --- a/internal/service/scaleway/client/mock_client/doc.go +++ b/internal/service/scaleway/client/mock_client/doc.go @@ -1,8 +1,10 @@ -//go:generate ../../../../../bin/mockgen -destination client_mock.go -package mock_client -source ../interface.go -typed //go:generate ../../../../../bin/mockgen -destination block_mock.go -package mock_client -source ../block.go -typed +//go:generate ../../../../../bin/mockgen -destination client_mock.go -package mock_client -source ../interface.go -typed +//go:generate ../../../../../bin/mockgen -destination config_mock.go -package mock_client -source ../config.go -typed //go:generate ../../../../../bin/mockgen -destination domain_mock.go -package mock_client -source ../domain.go -typed //go:generate ../../../../../bin/mockgen -destination instance_mock.go -package mock_client -source ../instance.go -typed //go:generate ../../../../../bin/mockgen -destination ipam_mock.go -package mock_client -source ../ipam.go -typed +//go:generate ../../../../../bin/mockgen -destination k8s_mock.go -package mock_client -source ../k8s.go -typed //go:generate ../../../../../bin/mockgen -destination lb_mock.go -package mock_client -source ../lb.go -typed //go:generate ../../../../../bin/mockgen -destination marketplace_mock.go -package mock_client -source ../marketplace.go -typed //go:generate ../../../../../bin/mockgen -destination vpc_mock.go -package mock_client -source ../vpc.go -typed diff --git a/internal/service/scaleway/client/mock_client/k8s_mock.go b/internal/service/scaleway/client/mock_client/k8s_mock.go new file mode 100644 index 0000000..472b7a6 --- /dev/null +++ b/internal/service/scaleway/client/mock_client/k8s_mock.go @@ -0,0 +1,1304 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: ../k8s.go +// +// Generated by this command: +// +// mockgen -destination k8s_mock.go -package mock_client -source ../k8s.go -typed +// + +// Package mock_client is a generated GoMock package. +package mock_client + +import ( + context "context" + reflect "reflect" + + k8s "github.com/scaleway/scaleway-sdk-go/api/k8s/v1" + scw "github.com/scaleway/scaleway-sdk-go/scw" + gomock "go.uber.org/mock/gomock" +) + +// MockK8sAPI is a mock of K8sAPI interface. +type MockK8sAPI struct { + ctrl *gomock.Controller + recorder *MockK8sAPIMockRecorder + isgomock struct{} +} + +// MockK8sAPIMockRecorder is the mock recorder for MockK8sAPI. +type MockK8sAPIMockRecorder struct { + mock *MockK8sAPI +} + +// NewMockK8sAPI creates a new mock instance. +func NewMockK8sAPI(ctrl *gomock.Controller) *MockK8sAPI { + mock := &MockK8sAPI{ctrl: ctrl} + mock.recorder = &MockK8sAPIMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockK8sAPI) EXPECT() *MockK8sAPIMockRecorder { + return m.recorder +} + +// CreateCluster mocks base method. +func (m *MockK8sAPI) CreateCluster(req *k8s.CreateClusterRequest, opts ...scw.RequestOption) (*k8s.Cluster, error) { + m.ctrl.T.Helper() + varargs := []any{req} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreateCluster", varargs...) + ret0, _ := ret[0].(*k8s.Cluster) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateCluster indicates an expected call of CreateCluster. +func (mr *MockK8sAPIMockRecorder) CreateCluster(req any, opts ...any) *MockK8sAPICreateClusterCall { + mr.mock.ctrl.T.Helper() + varargs := append([]any{req}, opts...) + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateCluster", reflect.TypeOf((*MockK8sAPI)(nil).CreateCluster), varargs...) + return &MockK8sAPICreateClusterCall{Call: call} +} + +// MockK8sAPICreateClusterCall wrap *gomock.Call +type MockK8sAPICreateClusterCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockK8sAPICreateClusterCall) Return(arg0 *k8s.Cluster, arg1 error) *MockK8sAPICreateClusterCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockK8sAPICreateClusterCall) Do(f func(*k8s.CreateClusterRequest, ...scw.RequestOption) (*k8s.Cluster, error)) *MockK8sAPICreateClusterCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockK8sAPICreateClusterCall) DoAndReturn(f func(*k8s.CreateClusterRequest, ...scw.RequestOption) (*k8s.Cluster, error)) *MockK8sAPICreateClusterCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// CreatePool mocks base method. +func (m *MockK8sAPI) CreatePool(req *k8s.CreatePoolRequest, opts ...scw.RequestOption) (*k8s.Pool, error) { + m.ctrl.T.Helper() + varargs := []any{req} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "CreatePool", varargs...) + ret0, _ := ret[0].(*k8s.Pool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreatePool indicates an expected call of CreatePool. +func (mr *MockK8sAPIMockRecorder) CreatePool(req any, opts ...any) *MockK8sAPICreatePoolCall { + mr.mock.ctrl.T.Helper() + varargs := append([]any{req}, opts...) + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreatePool", reflect.TypeOf((*MockK8sAPI)(nil).CreatePool), varargs...) + return &MockK8sAPICreatePoolCall{Call: call} +} + +// MockK8sAPICreatePoolCall wrap *gomock.Call +type MockK8sAPICreatePoolCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockK8sAPICreatePoolCall) Return(arg0 *k8s.Pool, arg1 error) *MockK8sAPICreatePoolCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockK8sAPICreatePoolCall) Do(f func(*k8s.CreatePoolRequest, ...scw.RequestOption) (*k8s.Pool, error)) *MockK8sAPICreatePoolCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockK8sAPICreatePoolCall) DoAndReturn(f func(*k8s.CreatePoolRequest, ...scw.RequestOption) (*k8s.Pool, error)) *MockK8sAPICreatePoolCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// DeleteCluster mocks base method. +func (m *MockK8sAPI) DeleteCluster(req *k8s.DeleteClusterRequest, opts ...scw.RequestOption) (*k8s.Cluster, error) { + m.ctrl.T.Helper() + varargs := []any{req} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeleteCluster", varargs...) + ret0, _ := ret[0].(*k8s.Cluster) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeleteCluster indicates an expected call of DeleteCluster. +func (mr *MockK8sAPIMockRecorder) DeleteCluster(req any, opts ...any) *MockK8sAPIDeleteClusterCall { + mr.mock.ctrl.T.Helper() + varargs := append([]any{req}, opts...) + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCluster", reflect.TypeOf((*MockK8sAPI)(nil).DeleteCluster), varargs...) + return &MockK8sAPIDeleteClusterCall{Call: call} +} + +// MockK8sAPIDeleteClusterCall wrap *gomock.Call +type MockK8sAPIDeleteClusterCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockK8sAPIDeleteClusterCall) Return(arg0 *k8s.Cluster, arg1 error) *MockK8sAPIDeleteClusterCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockK8sAPIDeleteClusterCall) Do(f func(*k8s.DeleteClusterRequest, ...scw.RequestOption) (*k8s.Cluster, error)) *MockK8sAPIDeleteClusterCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockK8sAPIDeleteClusterCall) DoAndReturn(f func(*k8s.DeleteClusterRequest, ...scw.RequestOption) (*k8s.Cluster, error)) *MockK8sAPIDeleteClusterCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// DeletePool mocks base method. +func (m *MockK8sAPI) DeletePool(req *k8s.DeletePoolRequest, opts ...scw.RequestOption) (*k8s.Pool, error) { + m.ctrl.T.Helper() + varargs := []any{req} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "DeletePool", varargs...) + ret0, _ := ret[0].(*k8s.Pool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// DeletePool indicates an expected call of DeletePool. +func (mr *MockK8sAPIMockRecorder) DeletePool(req any, opts ...any) *MockK8sAPIDeletePoolCall { + mr.mock.ctrl.T.Helper() + varargs := append([]any{req}, opts...) + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePool", reflect.TypeOf((*MockK8sAPI)(nil).DeletePool), varargs...) + return &MockK8sAPIDeletePoolCall{Call: call} +} + +// MockK8sAPIDeletePoolCall wrap *gomock.Call +type MockK8sAPIDeletePoolCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockK8sAPIDeletePoolCall) Return(arg0 *k8s.Pool, arg1 error) *MockK8sAPIDeletePoolCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockK8sAPIDeletePoolCall) Do(f func(*k8s.DeletePoolRequest, ...scw.RequestOption) (*k8s.Pool, error)) *MockK8sAPIDeletePoolCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockK8sAPIDeletePoolCall) DoAndReturn(f func(*k8s.DeletePoolRequest, ...scw.RequestOption) (*k8s.Pool, error)) *MockK8sAPIDeletePoolCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// GetClusterKubeConfig mocks base method. +func (m *MockK8sAPI) GetClusterKubeConfig(req *k8s.GetClusterKubeConfigRequest, opts ...scw.RequestOption) (*k8s.Kubeconfig, error) { + m.ctrl.T.Helper() + varargs := []any{req} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "GetClusterKubeConfig", varargs...) + ret0, _ := ret[0].(*k8s.Kubeconfig) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetClusterKubeConfig indicates an expected call of GetClusterKubeConfig. +func (mr *MockK8sAPIMockRecorder) GetClusterKubeConfig(req any, opts ...any) *MockK8sAPIGetClusterKubeConfigCall { + mr.mock.ctrl.T.Helper() + varargs := append([]any{req}, opts...) + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClusterKubeConfig", reflect.TypeOf((*MockK8sAPI)(nil).GetClusterKubeConfig), varargs...) + return &MockK8sAPIGetClusterKubeConfigCall{Call: call} +} + +// MockK8sAPIGetClusterKubeConfigCall wrap *gomock.Call +type MockK8sAPIGetClusterKubeConfigCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockK8sAPIGetClusterKubeConfigCall) Return(arg0 *k8s.Kubeconfig, arg1 error) *MockK8sAPIGetClusterKubeConfigCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockK8sAPIGetClusterKubeConfigCall) Do(f func(*k8s.GetClusterKubeConfigRequest, ...scw.RequestOption) (*k8s.Kubeconfig, error)) *MockK8sAPIGetClusterKubeConfigCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockK8sAPIGetClusterKubeConfigCall) DoAndReturn(f func(*k8s.GetClusterKubeConfigRequest, ...scw.RequestOption) (*k8s.Kubeconfig, error)) *MockK8sAPIGetClusterKubeConfigCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// ListClusterACLRules mocks base method. +func (m *MockK8sAPI) ListClusterACLRules(req *k8s.ListClusterACLRulesRequest, opts ...scw.RequestOption) (*k8s.ListClusterACLRulesResponse, error) { + m.ctrl.T.Helper() + varargs := []any{req} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListClusterACLRules", varargs...) + ret0, _ := ret[0].(*k8s.ListClusterACLRulesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListClusterACLRules indicates an expected call of ListClusterACLRules. +func (mr *MockK8sAPIMockRecorder) ListClusterACLRules(req any, opts ...any) *MockK8sAPIListClusterACLRulesCall { + mr.mock.ctrl.T.Helper() + varargs := append([]any{req}, opts...) + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListClusterACLRules", reflect.TypeOf((*MockK8sAPI)(nil).ListClusterACLRules), varargs...) + return &MockK8sAPIListClusterACLRulesCall{Call: call} +} + +// MockK8sAPIListClusterACLRulesCall wrap *gomock.Call +type MockK8sAPIListClusterACLRulesCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockK8sAPIListClusterACLRulesCall) Return(arg0 *k8s.ListClusterACLRulesResponse, arg1 error) *MockK8sAPIListClusterACLRulesCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockK8sAPIListClusterACLRulesCall) Do(f func(*k8s.ListClusterACLRulesRequest, ...scw.RequestOption) (*k8s.ListClusterACLRulesResponse, error)) *MockK8sAPIListClusterACLRulesCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockK8sAPIListClusterACLRulesCall) DoAndReturn(f func(*k8s.ListClusterACLRulesRequest, ...scw.RequestOption) (*k8s.ListClusterACLRulesResponse, error)) *MockK8sAPIListClusterACLRulesCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// ListClusters mocks base method. +func (m *MockK8sAPI) ListClusters(req *k8s.ListClustersRequest, opts ...scw.RequestOption) (*k8s.ListClustersResponse, error) { + m.ctrl.T.Helper() + varargs := []any{req} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListClusters", varargs...) + ret0, _ := ret[0].(*k8s.ListClustersResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListClusters indicates an expected call of ListClusters. +func (mr *MockK8sAPIMockRecorder) ListClusters(req any, opts ...any) *MockK8sAPIListClustersCall { + mr.mock.ctrl.T.Helper() + varargs := append([]any{req}, opts...) + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListClusters", reflect.TypeOf((*MockK8sAPI)(nil).ListClusters), varargs...) + return &MockK8sAPIListClustersCall{Call: call} +} + +// MockK8sAPIListClustersCall wrap *gomock.Call +type MockK8sAPIListClustersCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockK8sAPIListClustersCall) Return(arg0 *k8s.ListClustersResponse, arg1 error) *MockK8sAPIListClustersCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockK8sAPIListClustersCall) Do(f func(*k8s.ListClustersRequest, ...scw.RequestOption) (*k8s.ListClustersResponse, error)) *MockK8sAPIListClustersCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockK8sAPIListClustersCall) DoAndReturn(f func(*k8s.ListClustersRequest, ...scw.RequestOption) (*k8s.ListClustersResponse, error)) *MockK8sAPIListClustersCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// ListNodes mocks base method. +func (m *MockK8sAPI) ListNodes(req *k8s.ListNodesRequest, opts ...scw.RequestOption) (*k8s.ListNodesResponse, error) { + m.ctrl.T.Helper() + varargs := []any{req} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListNodes", varargs...) + ret0, _ := ret[0].(*k8s.ListNodesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListNodes indicates an expected call of ListNodes. +func (mr *MockK8sAPIMockRecorder) ListNodes(req any, opts ...any) *MockK8sAPIListNodesCall { + mr.mock.ctrl.T.Helper() + varargs := append([]any{req}, opts...) + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListNodes", reflect.TypeOf((*MockK8sAPI)(nil).ListNodes), varargs...) + return &MockK8sAPIListNodesCall{Call: call} +} + +// MockK8sAPIListNodesCall wrap *gomock.Call +type MockK8sAPIListNodesCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockK8sAPIListNodesCall) Return(arg0 *k8s.ListNodesResponse, arg1 error) *MockK8sAPIListNodesCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockK8sAPIListNodesCall) Do(f func(*k8s.ListNodesRequest, ...scw.RequestOption) (*k8s.ListNodesResponse, error)) *MockK8sAPIListNodesCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockK8sAPIListNodesCall) DoAndReturn(f func(*k8s.ListNodesRequest, ...scw.RequestOption) (*k8s.ListNodesResponse, error)) *MockK8sAPIListNodesCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// ListPools mocks base method. +func (m *MockK8sAPI) ListPools(req *k8s.ListPoolsRequest, opts ...scw.RequestOption) (*k8s.ListPoolsResponse, error) { + m.ctrl.T.Helper() + varargs := []any{req} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "ListPools", varargs...) + ret0, _ := ret[0].(*k8s.ListPoolsResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListPools indicates an expected call of ListPools. +func (mr *MockK8sAPIMockRecorder) ListPools(req any, opts ...any) *MockK8sAPIListPoolsCall { + mr.mock.ctrl.T.Helper() + varargs := append([]any{req}, opts...) + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPools", reflect.TypeOf((*MockK8sAPI)(nil).ListPools), varargs...) + return &MockK8sAPIListPoolsCall{Call: call} +} + +// MockK8sAPIListPoolsCall wrap *gomock.Call +type MockK8sAPIListPoolsCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockK8sAPIListPoolsCall) Return(arg0 *k8s.ListPoolsResponse, arg1 error) *MockK8sAPIListPoolsCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockK8sAPIListPoolsCall) Do(f func(*k8s.ListPoolsRequest, ...scw.RequestOption) (*k8s.ListPoolsResponse, error)) *MockK8sAPIListPoolsCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockK8sAPIListPoolsCall) DoAndReturn(f func(*k8s.ListPoolsRequest, ...scw.RequestOption) (*k8s.ListPoolsResponse, error)) *MockK8sAPIListPoolsCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetClusterACLRules mocks base method. +func (m *MockK8sAPI) SetClusterACLRules(req *k8s.SetClusterACLRulesRequest, opts ...scw.RequestOption) (*k8s.SetClusterACLRulesResponse, error) { + m.ctrl.T.Helper() + varargs := []any{req} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "SetClusterACLRules", varargs...) + ret0, _ := ret[0].(*k8s.SetClusterACLRulesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SetClusterACLRules indicates an expected call of SetClusterACLRules. +func (mr *MockK8sAPIMockRecorder) SetClusterACLRules(req any, opts ...any) *MockK8sAPISetClusterACLRulesCall { + mr.mock.ctrl.T.Helper() + varargs := append([]any{req}, opts...) + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetClusterACLRules", reflect.TypeOf((*MockK8sAPI)(nil).SetClusterACLRules), varargs...) + return &MockK8sAPISetClusterACLRulesCall{Call: call} +} + +// MockK8sAPISetClusterACLRulesCall wrap *gomock.Call +type MockK8sAPISetClusterACLRulesCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockK8sAPISetClusterACLRulesCall) Return(arg0 *k8s.SetClusterACLRulesResponse, arg1 error) *MockK8sAPISetClusterACLRulesCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockK8sAPISetClusterACLRulesCall) Do(f func(*k8s.SetClusterACLRulesRequest, ...scw.RequestOption) (*k8s.SetClusterACLRulesResponse, error)) *MockK8sAPISetClusterACLRulesCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockK8sAPISetClusterACLRulesCall) DoAndReturn(f func(*k8s.SetClusterACLRulesRequest, ...scw.RequestOption) (*k8s.SetClusterACLRulesResponse, error)) *MockK8sAPISetClusterACLRulesCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetClusterType mocks base method. +func (m *MockK8sAPI) SetClusterType(req *k8s.SetClusterTypeRequest, opts ...scw.RequestOption) (*k8s.Cluster, error) { + m.ctrl.T.Helper() + varargs := []any{req} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "SetClusterType", varargs...) + ret0, _ := ret[0].(*k8s.Cluster) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SetClusterType indicates an expected call of SetClusterType. +func (mr *MockK8sAPIMockRecorder) SetClusterType(req any, opts ...any) *MockK8sAPISetClusterTypeCall { + mr.mock.ctrl.T.Helper() + varargs := append([]any{req}, opts...) + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetClusterType", reflect.TypeOf((*MockK8sAPI)(nil).SetClusterType), varargs...) + return &MockK8sAPISetClusterTypeCall{Call: call} +} + +// MockK8sAPISetClusterTypeCall wrap *gomock.Call +type MockK8sAPISetClusterTypeCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockK8sAPISetClusterTypeCall) Return(arg0 *k8s.Cluster, arg1 error) *MockK8sAPISetClusterTypeCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockK8sAPISetClusterTypeCall) Do(f func(*k8s.SetClusterTypeRequest, ...scw.RequestOption) (*k8s.Cluster, error)) *MockK8sAPISetClusterTypeCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockK8sAPISetClusterTypeCall) DoAndReturn(f func(*k8s.SetClusterTypeRequest, ...scw.RequestOption) (*k8s.Cluster, error)) *MockK8sAPISetClusterTypeCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// UpdateCluster mocks base method. +func (m *MockK8sAPI) UpdateCluster(req *k8s.UpdateClusterRequest, opts ...scw.RequestOption) (*k8s.Cluster, error) { + m.ctrl.T.Helper() + varargs := []any{req} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdateCluster", varargs...) + ret0, _ := ret[0].(*k8s.Cluster) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdateCluster indicates an expected call of UpdateCluster. +func (mr *MockK8sAPIMockRecorder) UpdateCluster(req any, opts ...any) *MockK8sAPIUpdateClusterCall { + mr.mock.ctrl.T.Helper() + varargs := append([]any{req}, opts...) + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateCluster", reflect.TypeOf((*MockK8sAPI)(nil).UpdateCluster), varargs...) + return &MockK8sAPIUpdateClusterCall{Call: call} +} + +// MockK8sAPIUpdateClusterCall wrap *gomock.Call +type MockK8sAPIUpdateClusterCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockK8sAPIUpdateClusterCall) Return(arg0 *k8s.Cluster, arg1 error) *MockK8sAPIUpdateClusterCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockK8sAPIUpdateClusterCall) Do(f func(*k8s.UpdateClusterRequest, ...scw.RequestOption) (*k8s.Cluster, error)) *MockK8sAPIUpdateClusterCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockK8sAPIUpdateClusterCall) DoAndReturn(f func(*k8s.UpdateClusterRequest, ...scw.RequestOption) (*k8s.Cluster, error)) *MockK8sAPIUpdateClusterCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// UpdatePool mocks base method. +func (m *MockK8sAPI) UpdatePool(req *k8s.UpdatePoolRequest, opts ...scw.RequestOption) (*k8s.Pool, error) { + m.ctrl.T.Helper() + varargs := []any{req} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpdatePool", varargs...) + ret0, _ := ret[0].(*k8s.Pool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpdatePool indicates an expected call of UpdatePool. +func (mr *MockK8sAPIMockRecorder) UpdatePool(req any, opts ...any) *MockK8sAPIUpdatePoolCall { + mr.mock.ctrl.T.Helper() + varargs := append([]any{req}, opts...) + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePool", reflect.TypeOf((*MockK8sAPI)(nil).UpdatePool), varargs...) + return &MockK8sAPIUpdatePoolCall{Call: call} +} + +// MockK8sAPIUpdatePoolCall wrap *gomock.Call +type MockK8sAPIUpdatePoolCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockK8sAPIUpdatePoolCall) Return(arg0 *k8s.Pool, arg1 error) *MockK8sAPIUpdatePoolCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockK8sAPIUpdatePoolCall) Do(f func(*k8s.UpdatePoolRequest, ...scw.RequestOption) (*k8s.Pool, error)) *MockK8sAPIUpdatePoolCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockK8sAPIUpdatePoolCall) DoAndReturn(f func(*k8s.UpdatePoolRequest, ...scw.RequestOption) (*k8s.Pool, error)) *MockK8sAPIUpdatePoolCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// UpgradeCluster mocks base method. +func (m *MockK8sAPI) UpgradeCluster(req *k8s.UpgradeClusterRequest, opts ...scw.RequestOption) (*k8s.Cluster, error) { + m.ctrl.T.Helper() + varargs := []any{req} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpgradeCluster", varargs...) + ret0, _ := ret[0].(*k8s.Cluster) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpgradeCluster indicates an expected call of UpgradeCluster. +func (mr *MockK8sAPIMockRecorder) UpgradeCluster(req any, opts ...any) *MockK8sAPIUpgradeClusterCall { + mr.mock.ctrl.T.Helper() + varargs := append([]any{req}, opts...) + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpgradeCluster", reflect.TypeOf((*MockK8sAPI)(nil).UpgradeCluster), varargs...) + return &MockK8sAPIUpgradeClusterCall{Call: call} +} + +// MockK8sAPIUpgradeClusterCall wrap *gomock.Call +type MockK8sAPIUpgradeClusterCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockK8sAPIUpgradeClusterCall) Return(arg0 *k8s.Cluster, arg1 error) *MockK8sAPIUpgradeClusterCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockK8sAPIUpgradeClusterCall) Do(f func(*k8s.UpgradeClusterRequest, ...scw.RequestOption) (*k8s.Cluster, error)) *MockK8sAPIUpgradeClusterCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockK8sAPIUpgradeClusterCall) DoAndReturn(f func(*k8s.UpgradeClusterRequest, ...scw.RequestOption) (*k8s.Cluster, error)) *MockK8sAPIUpgradeClusterCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// UpgradePool mocks base method. +func (m *MockK8sAPI) UpgradePool(req *k8s.UpgradePoolRequest, opts ...scw.RequestOption) (*k8s.Pool, error) { + m.ctrl.T.Helper() + varargs := []any{req} + for _, a := range opts { + varargs = append(varargs, a) + } + ret := m.ctrl.Call(m, "UpgradePool", varargs...) + ret0, _ := ret[0].(*k8s.Pool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// UpgradePool indicates an expected call of UpgradePool. +func (mr *MockK8sAPIMockRecorder) UpgradePool(req any, opts ...any) *MockK8sAPIUpgradePoolCall { + mr.mock.ctrl.T.Helper() + varargs := append([]any{req}, opts...) + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpgradePool", reflect.TypeOf((*MockK8sAPI)(nil).UpgradePool), varargs...) + return &MockK8sAPIUpgradePoolCall{Call: call} +} + +// MockK8sAPIUpgradePoolCall wrap *gomock.Call +type MockK8sAPIUpgradePoolCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockK8sAPIUpgradePoolCall) Return(arg0 *k8s.Pool, arg1 error) *MockK8sAPIUpgradePoolCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockK8sAPIUpgradePoolCall) Do(f func(*k8s.UpgradePoolRequest, ...scw.RequestOption) (*k8s.Pool, error)) *MockK8sAPIUpgradePoolCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockK8sAPIUpgradePoolCall) DoAndReturn(f func(*k8s.UpgradePoolRequest, ...scw.RequestOption) (*k8s.Pool, error)) *MockK8sAPIUpgradePoolCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// MockK8s is a mock of K8s interface. +type MockK8s struct { + ctrl *gomock.Controller + recorder *MockK8sMockRecorder + isgomock struct{} +} + +// MockK8sMockRecorder is the mock recorder for MockK8s. +type MockK8sMockRecorder struct { + mock *MockK8s +} + +// NewMockK8s creates a new mock instance. +func NewMockK8s(ctrl *gomock.Controller) *MockK8s { + mock := &MockK8s{ctrl: ctrl} + mock.recorder = &MockK8sMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockK8s) EXPECT() *MockK8sMockRecorder { + return m.recorder +} + +// CreateCluster mocks base method. +func (m *MockK8s) CreateCluster(ctx context.Context, name, clusterType, version string, pnID *string, tags, featureGates, admissionPlugins, apiServerCertSANs []string, cni k8s.CNI, autoscalerConfig *k8s.CreateClusterRequestAutoscalerConfig, autoUpgrade *k8s.CreateClusterRequestAutoUpgrade, openIDConnectConfig *k8s.CreateClusterRequestOpenIDConnectConfig, podCIDR, serviceCIDR scw.IPNet) (*k8s.Cluster, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateCluster", ctx, name, clusterType, version, pnID, tags, featureGates, admissionPlugins, apiServerCertSANs, cni, autoscalerConfig, autoUpgrade, openIDConnectConfig, podCIDR, serviceCIDR) + ret0, _ := ret[0].(*k8s.Cluster) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateCluster indicates an expected call of CreateCluster. +func (mr *MockK8sMockRecorder) CreateCluster(ctx, name, clusterType, version, pnID, tags, featureGates, admissionPlugins, apiServerCertSANs, cni, autoscalerConfig, autoUpgrade, openIDConnectConfig, podCIDR, serviceCIDR any) *MockK8sCreateClusterCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateCluster", reflect.TypeOf((*MockK8s)(nil).CreateCluster), ctx, name, clusterType, version, pnID, tags, featureGates, admissionPlugins, apiServerCertSANs, cni, autoscalerConfig, autoUpgrade, openIDConnectConfig, podCIDR, serviceCIDR) + return &MockK8sCreateClusterCall{Call: call} +} + +// MockK8sCreateClusterCall wrap *gomock.Call +type MockK8sCreateClusterCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockK8sCreateClusterCall) Return(arg0 *k8s.Cluster, arg1 error) *MockK8sCreateClusterCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockK8sCreateClusterCall) Do(f func(context.Context, string, string, string, *string, []string, []string, []string, []string, k8s.CNI, *k8s.CreateClusterRequestAutoscalerConfig, *k8s.CreateClusterRequestAutoUpgrade, *k8s.CreateClusterRequestOpenIDConnectConfig, scw.IPNet, scw.IPNet) (*k8s.Cluster, error)) *MockK8sCreateClusterCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockK8sCreateClusterCall) DoAndReturn(f func(context.Context, string, string, string, *string, []string, []string, []string, []string, k8s.CNI, *k8s.CreateClusterRequestAutoscalerConfig, *k8s.CreateClusterRequestAutoUpgrade, *k8s.CreateClusterRequestOpenIDConnectConfig, scw.IPNet, scw.IPNet) (*k8s.Cluster, error)) *MockK8sCreateClusterCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// CreatePool mocks base method. +func (m *MockK8s) CreatePool(ctx context.Context, zone scw.Zone, clusterID, name, nodeType string, placementGroupID, securityGroupID *string, autoscaling, autohealing, publicIPDisabled bool, size uint32, minSize, maxSize *uint32, tags []string, kubeletArgs map[string]string, rootVolumeType k8s.PoolVolumeType, rootVolumeSizeGB *uint64, upgradePolicy *k8s.CreatePoolRequestUpgradePolicy) (*k8s.Pool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreatePool", ctx, zone, clusterID, name, nodeType, placementGroupID, securityGroupID, autoscaling, autohealing, publicIPDisabled, size, minSize, maxSize, tags, kubeletArgs, rootVolumeType, rootVolumeSizeGB, upgradePolicy) + ret0, _ := ret[0].(*k8s.Pool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreatePool indicates an expected call of CreatePool. +func (mr *MockK8sMockRecorder) CreatePool(ctx, zone, clusterID, name, nodeType, placementGroupID, securityGroupID, autoscaling, autohealing, publicIPDisabled, size, minSize, maxSize, tags, kubeletArgs, rootVolumeType, rootVolumeSizeGB, upgradePolicy any) *MockK8sCreatePoolCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreatePool", reflect.TypeOf((*MockK8s)(nil).CreatePool), ctx, zone, clusterID, name, nodeType, placementGroupID, securityGroupID, autoscaling, autohealing, publicIPDisabled, size, minSize, maxSize, tags, kubeletArgs, rootVolumeType, rootVolumeSizeGB, upgradePolicy) + return &MockK8sCreatePoolCall{Call: call} +} + +// MockK8sCreatePoolCall wrap *gomock.Call +type MockK8sCreatePoolCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockK8sCreatePoolCall) Return(arg0 *k8s.Pool, arg1 error) *MockK8sCreatePoolCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockK8sCreatePoolCall) Do(f func(context.Context, scw.Zone, string, string, string, *string, *string, bool, bool, bool, uint32, *uint32, *uint32, []string, map[string]string, k8s.PoolVolumeType, *uint64, *k8s.CreatePoolRequestUpgradePolicy) (*k8s.Pool, error)) *MockK8sCreatePoolCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockK8sCreatePoolCall) DoAndReturn(f func(context.Context, scw.Zone, string, string, string, *string, *string, bool, bool, bool, uint32, *uint32, *uint32, []string, map[string]string, k8s.PoolVolumeType, *uint64, *k8s.CreatePoolRequestUpgradePolicy) (*k8s.Pool, error)) *MockK8sCreatePoolCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// DeleteCluster mocks base method. +func (m *MockK8s) DeleteCluster(ctx context.Context, id string, withAdditionalResources bool) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteCluster", ctx, id, withAdditionalResources) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteCluster indicates an expected call of DeleteCluster. +func (mr *MockK8sMockRecorder) DeleteCluster(ctx, id, withAdditionalResources any) *MockK8sDeleteClusterCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteCluster", reflect.TypeOf((*MockK8s)(nil).DeleteCluster), ctx, id, withAdditionalResources) + return &MockK8sDeleteClusterCall{Call: call} +} + +// MockK8sDeleteClusterCall wrap *gomock.Call +type MockK8sDeleteClusterCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockK8sDeleteClusterCall) Return(arg0 error) *MockK8sDeleteClusterCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockK8sDeleteClusterCall) Do(f func(context.Context, string, bool) error) *MockK8sDeleteClusterCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockK8sDeleteClusterCall) DoAndReturn(f func(context.Context, string, bool) error) *MockK8sDeleteClusterCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// DeletePool mocks base method. +func (m *MockK8s) DeletePool(ctx context.Context, id string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeletePool", ctx, id) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeletePool indicates an expected call of DeletePool. +func (mr *MockK8sMockRecorder) DeletePool(ctx, id any) *MockK8sDeletePoolCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeletePool", reflect.TypeOf((*MockK8s)(nil).DeletePool), ctx, id) + return &MockK8sDeletePoolCall{Call: call} +} + +// MockK8sDeletePoolCall wrap *gomock.Call +type MockK8sDeletePoolCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockK8sDeletePoolCall) Return(arg0 error) *MockK8sDeletePoolCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockK8sDeletePoolCall) Do(f func(context.Context, string) error) *MockK8sDeletePoolCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockK8sDeletePoolCall) DoAndReturn(f func(context.Context, string) error) *MockK8sDeletePoolCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// FindCluster mocks base method. +func (m *MockK8s) FindCluster(ctx context.Context, name string) (*k8s.Cluster, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FindCluster", ctx, name) + ret0, _ := ret[0].(*k8s.Cluster) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FindCluster indicates an expected call of FindCluster. +func (mr *MockK8sMockRecorder) FindCluster(ctx, name any) *MockK8sFindClusterCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindCluster", reflect.TypeOf((*MockK8s)(nil).FindCluster), ctx, name) + return &MockK8sFindClusterCall{Call: call} +} + +// MockK8sFindClusterCall wrap *gomock.Call +type MockK8sFindClusterCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockK8sFindClusterCall) Return(arg0 *k8s.Cluster, arg1 error) *MockK8sFindClusterCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockK8sFindClusterCall) Do(f func(context.Context, string) (*k8s.Cluster, error)) *MockK8sFindClusterCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockK8sFindClusterCall) DoAndReturn(f func(context.Context, string) (*k8s.Cluster, error)) *MockK8sFindClusterCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// FindPool mocks base method. +func (m *MockK8s) FindPool(ctx context.Context, clusterID, name string) (*k8s.Pool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "FindPool", ctx, clusterID, name) + ret0, _ := ret[0].(*k8s.Pool) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// FindPool indicates an expected call of FindPool. +func (mr *MockK8sMockRecorder) FindPool(ctx, clusterID, name any) *MockK8sFindPoolCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FindPool", reflect.TypeOf((*MockK8s)(nil).FindPool), ctx, clusterID, name) + return &MockK8sFindPoolCall{Call: call} +} + +// MockK8sFindPoolCall wrap *gomock.Call +type MockK8sFindPoolCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockK8sFindPoolCall) Return(arg0 *k8s.Pool, arg1 error) *MockK8sFindPoolCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockK8sFindPoolCall) Do(f func(context.Context, string, string) (*k8s.Pool, error)) *MockK8sFindPoolCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockK8sFindPoolCall) DoAndReturn(f func(context.Context, string, string) (*k8s.Pool, error)) *MockK8sFindPoolCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// GetClusterKubeConfig mocks base method. +func (m *MockK8s) GetClusterKubeConfig(ctx context.Context, id string) (*k8s.Kubeconfig, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetClusterKubeConfig", ctx, id) + ret0, _ := ret[0].(*k8s.Kubeconfig) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetClusterKubeConfig indicates an expected call of GetClusterKubeConfig. +func (mr *MockK8sMockRecorder) GetClusterKubeConfig(ctx, id any) *MockK8sGetClusterKubeConfigCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClusterKubeConfig", reflect.TypeOf((*MockK8s)(nil).GetClusterKubeConfig), ctx, id) + return &MockK8sGetClusterKubeConfigCall{Call: call} +} + +// MockK8sGetClusterKubeConfigCall wrap *gomock.Call +type MockK8sGetClusterKubeConfigCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockK8sGetClusterKubeConfigCall) Return(arg0 *k8s.Kubeconfig, arg1 error) *MockK8sGetClusterKubeConfigCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockK8sGetClusterKubeConfigCall) Do(f func(context.Context, string) (*k8s.Kubeconfig, error)) *MockK8sGetClusterKubeConfigCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockK8sGetClusterKubeConfigCall) DoAndReturn(f func(context.Context, string) (*k8s.Kubeconfig, error)) *MockK8sGetClusterKubeConfigCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// ListClusterACLRules mocks base method. +func (m *MockK8s) ListClusterACLRules(ctx context.Context, clusterID string) ([]*k8s.ACLRule, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListClusterACLRules", ctx, clusterID) + ret0, _ := ret[0].([]*k8s.ACLRule) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListClusterACLRules indicates an expected call of ListClusterACLRules. +func (mr *MockK8sMockRecorder) ListClusterACLRules(ctx, clusterID any) *MockK8sListClusterACLRulesCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListClusterACLRules", reflect.TypeOf((*MockK8s)(nil).ListClusterACLRules), ctx, clusterID) + return &MockK8sListClusterACLRulesCall{Call: call} +} + +// MockK8sListClusterACLRulesCall wrap *gomock.Call +type MockK8sListClusterACLRulesCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockK8sListClusterACLRulesCall) Return(arg0 []*k8s.ACLRule, arg1 error) *MockK8sListClusterACLRulesCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockK8sListClusterACLRulesCall) Do(f func(context.Context, string) ([]*k8s.ACLRule, error)) *MockK8sListClusterACLRulesCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockK8sListClusterACLRulesCall) DoAndReturn(f func(context.Context, string) ([]*k8s.ACLRule, error)) *MockK8sListClusterACLRulesCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// ListNodes mocks base method. +func (m *MockK8s) ListNodes(ctx context.Context, clusterID, poolID string) ([]*k8s.Node, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListNodes", ctx, clusterID, poolID) + ret0, _ := ret[0].([]*k8s.Node) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListNodes indicates an expected call of ListNodes. +func (mr *MockK8sMockRecorder) ListNodes(ctx, clusterID, poolID any) *MockK8sListNodesCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListNodes", reflect.TypeOf((*MockK8s)(nil).ListNodes), ctx, clusterID, poolID) + return &MockK8sListNodesCall{Call: call} +} + +// MockK8sListNodesCall wrap *gomock.Call +type MockK8sListNodesCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockK8sListNodesCall) Return(arg0 []*k8s.Node, arg1 error) *MockK8sListNodesCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockK8sListNodesCall) Do(f func(context.Context, string, string) ([]*k8s.Node, error)) *MockK8sListNodesCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockK8sListNodesCall) DoAndReturn(f func(context.Context, string, string) ([]*k8s.Node, error)) *MockK8sListNodesCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetClusterACLRules mocks base method. +func (m *MockK8s) SetClusterACLRules(ctx context.Context, clusterID string, rules []*k8s.ACLRuleRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetClusterACLRules", ctx, clusterID, rules) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetClusterACLRules indicates an expected call of SetClusterACLRules. +func (mr *MockK8sMockRecorder) SetClusterACLRules(ctx, clusterID, rules any) *MockK8sSetClusterACLRulesCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetClusterACLRules", reflect.TypeOf((*MockK8s)(nil).SetClusterACLRules), ctx, clusterID, rules) + return &MockK8sSetClusterACLRulesCall{Call: call} +} + +// MockK8sSetClusterACLRulesCall wrap *gomock.Call +type MockK8sSetClusterACLRulesCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockK8sSetClusterACLRulesCall) Return(arg0 error) *MockK8sSetClusterACLRulesCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockK8sSetClusterACLRulesCall) Do(f func(context.Context, string, []*k8s.ACLRuleRequest) error) *MockK8sSetClusterACLRulesCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockK8sSetClusterACLRulesCall) DoAndReturn(f func(context.Context, string, []*k8s.ACLRuleRequest) error) *MockK8sSetClusterACLRulesCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// SetClusterType mocks base method. +func (m *MockK8s) SetClusterType(ctx context.Context, id, clusterType string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SetClusterType", ctx, id, clusterType) + ret0, _ := ret[0].(error) + return ret0 +} + +// SetClusterType indicates an expected call of SetClusterType. +func (mr *MockK8sMockRecorder) SetClusterType(ctx, id, clusterType any) *MockK8sSetClusterTypeCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetClusterType", reflect.TypeOf((*MockK8s)(nil).SetClusterType), ctx, id, clusterType) + return &MockK8sSetClusterTypeCall{Call: call} +} + +// MockK8sSetClusterTypeCall wrap *gomock.Call +type MockK8sSetClusterTypeCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockK8sSetClusterTypeCall) Return(arg0 error) *MockK8sSetClusterTypeCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockK8sSetClusterTypeCall) Do(f func(context.Context, string, string) error) *MockK8sSetClusterTypeCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockK8sSetClusterTypeCall) DoAndReturn(f func(context.Context, string, string) error) *MockK8sSetClusterTypeCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// UpdateCluster mocks base method. +func (m *MockK8s) UpdateCluster(ctx context.Context, id string, tags, featureGates, admissionPlugins, apiServerCertSANs *[]string, autoscalerConfig *k8s.UpdateClusterRequestAutoscalerConfig, autoUpgrade *k8s.UpdateClusterRequestAutoUpgrade, openIDConnectConfig *k8s.UpdateClusterRequestOpenIDConnectConfig) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateCluster", ctx, id, tags, featureGates, admissionPlugins, apiServerCertSANs, autoscalerConfig, autoUpgrade, openIDConnectConfig) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateCluster indicates an expected call of UpdateCluster. +func (mr *MockK8sMockRecorder) UpdateCluster(ctx, id, tags, featureGates, admissionPlugins, apiServerCertSANs, autoscalerConfig, autoUpgrade, openIDConnectConfig any) *MockK8sUpdateClusterCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateCluster", reflect.TypeOf((*MockK8s)(nil).UpdateCluster), ctx, id, tags, featureGates, admissionPlugins, apiServerCertSANs, autoscalerConfig, autoUpgrade, openIDConnectConfig) + return &MockK8sUpdateClusterCall{Call: call} +} + +// MockK8sUpdateClusterCall wrap *gomock.Call +type MockK8sUpdateClusterCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockK8sUpdateClusterCall) Return(arg0 error) *MockK8sUpdateClusterCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockK8sUpdateClusterCall) Do(f func(context.Context, string, *[]string, *[]string, *[]string, *[]string, *k8s.UpdateClusterRequestAutoscalerConfig, *k8s.UpdateClusterRequestAutoUpgrade, *k8s.UpdateClusterRequestOpenIDConnectConfig) error) *MockK8sUpdateClusterCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockK8sUpdateClusterCall) DoAndReturn(f func(context.Context, string, *[]string, *[]string, *[]string, *[]string, *k8s.UpdateClusterRequestAutoscalerConfig, *k8s.UpdateClusterRequestAutoUpgrade, *k8s.UpdateClusterRequestOpenIDConnectConfig) error) *MockK8sUpdateClusterCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// UpdatePool mocks base method. +func (m *MockK8s) UpdatePool(ctx context.Context, id string, autoscaling, autohealing *bool, size, minSize, maxSize *uint32, tags *[]string, kubeletArgs *map[string]string, upgradePolicy *k8s.UpdatePoolRequestUpgradePolicy) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdatePool", ctx, id, autoscaling, autohealing, size, minSize, maxSize, tags, kubeletArgs, upgradePolicy) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdatePool indicates an expected call of UpdatePool. +func (mr *MockK8sMockRecorder) UpdatePool(ctx, id, autoscaling, autohealing, size, minSize, maxSize, tags, kubeletArgs, upgradePolicy any) *MockK8sUpdatePoolCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePool", reflect.TypeOf((*MockK8s)(nil).UpdatePool), ctx, id, autoscaling, autohealing, size, minSize, maxSize, tags, kubeletArgs, upgradePolicy) + return &MockK8sUpdatePoolCall{Call: call} +} + +// MockK8sUpdatePoolCall wrap *gomock.Call +type MockK8sUpdatePoolCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockK8sUpdatePoolCall) Return(arg0 error) *MockK8sUpdatePoolCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockK8sUpdatePoolCall) Do(f func(context.Context, string, *bool, *bool, *uint32, *uint32, *uint32, *[]string, *map[string]string, *k8s.UpdatePoolRequestUpgradePolicy) error) *MockK8sUpdatePoolCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockK8sUpdatePoolCall) DoAndReturn(f func(context.Context, string, *bool, *bool, *uint32, *uint32, *uint32, *[]string, *map[string]string, *k8s.UpdatePoolRequestUpgradePolicy) error) *MockK8sUpdatePoolCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// UpgradeCluster mocks base method. +func (m *MockK8s) UpgradeCluster(ctx context.Context, id, version string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpgradeCluster", ctx, id, version) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpgradeCluster indicates an expected call of UpgradeCluster. +func (mr *MockK8sMockRecorder) UpgradeCluster(ctx, id, version any) *MockK8sUpgradeClusterCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpgradeCluster", reflect.TypeOf((*MockK8s)(nil).UpgradeCluster), ctx, id, version) + return &MockK8sUpgradeClusterCall{Call: call} +} + +// MockK8sUpgradeClusterCall wrap *gomock.Call +type MockK8sUpgradeClusterCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockK8sUpgradeClusterCall) Return(arg0 error) *MockK8sUpgradeClusterCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockK8sUpgradeClusterCall) Do(f func(context.Context, string, string) error) *MockK8sUpgradeClusterCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockK8sUpgradeClusterCall) DoAndReturn(f func(context.Context, string, string) error) *MockK8sUpgradeClusterCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// UpgradePool mocks base method. +func (m *MockK8s) UpgradePool(ctx context.Context, id, version string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpgradePool", ctx, id, version) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpgradePool indicates an expected call of UpgradePool. +func (mr *MockK8sMockRecorder) UpgradePool(ctx, id, version any) *MockK8sUpgradePoolCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpgradePool", reflect.TypeOf((*MockK8s)(nil).UpgradePool), ctx, id, version) + return &MockK8sUpgradePoolCall{Call: call} +} + +// MockK8sUpgradePoolCall wrap *gomock.Call +type MockK8sUpgradePoolCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockK8sUpgradePoolCall) Return(arg0 error) *MockK8sUpgradePoolCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockK8sUpgradePoolCall) Do(f func(context.Context, string, string) error) *MockK8sUpgradePoolCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockK8sUpgradePoolCall) DoAndReturn(f func(context.Context, string, string) error) *MockK8sUpgradePoolCall { + c.Call = c.Call.DoAndReturn(f) + return c +} diff --git a/internal/service/scaleway/common/semver.go b/internal/service/scaleway/common/semver.go new file mode 100644 index 0000000..ac4388c --- /dev/null +++ b/internal/service/scaleway/common/semver.go @@ -0,0 +1,21 @@ +package common + +import ( + "fmt" + + "github.com/Masterminds/semver/v3" +) + +// IsUpToDate compares current and desired semver and returns true if current >= desired. +func IsUpToDate(current, desired string) (bool, error) { + curr, err := semver.StrictNewVersion(current) + if err != nil { + return false, fmt.Errorf("failed to parse current version: %w", err) + } + desi, err := semver.StrictNewVersion(desired) + if err != nil { + return false, fmt.Errorf("failed to parse desired version: %w", err) + } + + return curr.GreaterThanEqual(desi), nil +} diff --git a/internal/service/scaleway/common/semver_test.go b/internal/service/scaleway/common/semver_test.go new file mode 100644 index 0000000..c3f00e2 --- /dev/null +++ b/internal/service/scaleway/common/semver_test.go @@ -0,0 +1,61 @@ +package common + +import "testing" + +func TestIsUpToDate(t *testing.T) { + type args struct { + current string + desired string + } + tests := []struct { + name string + args args + want bool + wantErr bool + }{ + { + name: "not semver compliant (leading v)", + args: args{ + current: "v1.1.1", + desired: "v1.1.1", + }, + wantErr: true, + }, + { + name: "same version is up to date", + args: args{ + current: "1.30.0", + desired: "1.30.0", + }, + want: true, + }, + { + name: "current > desired is up to date", + args: args{ + current: "1.31.0", + desired: "1.30.0", + }, + want: true, + }, + { + name: "current < desired is not up to date", + args: args{ + current: "1.30.0", + desired: "1.31.0", + }, + want: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := IsUpToDate(tt.args.current, tt.args.desired) + if (err != nil) != tt.wantErr { + t.Errorf("IsUpToDate() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("IsUpToDate() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/internal/service/scaleway/common/slices.go b/internal/service/scaleway/common/slices.go new file mode 100644 index 0000000..b8a86bb --- /dev/null +++ b/internal/service/scaleway/common/slices.go @@ -0,0 +1,15 @@ +package common + +import ( + "cmp" + "slices" +) + +// SlicesEqualIgnoreOrder returns true if both slices are equal, regardless of order. +func SlicesEqualIgnoreOrder[T cmp.Ordered](a, b []T) bool { + if len(a) != len(b) { + return false + } + + return slices.Equal(slices.Sorted(slices.Values(a)), slices.Sorted(slices.Values(b))) +} diff --git a/internal/service/scaleway/common/slices_test.go b/internal/service/scaleway/common/slices_test.go new file mode 100644 index 0000000..3c624e0 --- /dev/null +++ b/internal/service/scaleway/common/slices_test.go @@ -0,0 +1,55 @@ +package common + +import "testing" + +func TestSlicesEqualIgnoreOrder(t *testing.T) { + type args struct { + a []string + b []string + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "not equal (size mismatch)", + args: args{ + a: []string{"a"}, + b: []string{"a", "a"}, + }, + want: false, + }, + { + name: "not equal", + args: args{ + a: []string{"a", "b"}, + b: []string{"a", "c"}, + }, + want: false, + }, + { + name: "equal with repeated elements", + args: args{ + a: []string{"a", "a", "a"}, + b: []string{"a", "a", "a"}, + }, + want: true, + }, + { + name: "equal with no repeated elements", + args: args{ + a: []string{"a", "b", "c"}, + b: []string{"a", "b", "c"}, + }, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := SlicesEqualIgnoreOrder(tt.args.a, tt.args.b); got != tt.want { + t.Errorf("SlicesEqualIgnoreOrder() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/internal/service/scaleway/k8s/cluster/cluster.go b/internal/service/scaleway/k8s/cluster/cluster.go new file mode 100644 index 0000000..722c518 --- /dev/null +++ b/internal/service/scaleway/k8s/cluster/cluster.go @@ -0,0 +1,425 @@ +package cluster + +import ( + "context" + "fmt" + "net" + "net/url" + "strconv" + "sync" + "time" + + "github.com/scaleway/cluster-api-provider-scaleway/internal/scope" + "github.com/scaleway/cluster-api-provider-scaleway/internal/service/scaleway" + "github.com/scaleway/cluster-api-provider-scaleway/internal/service/scaleway/client" + "github.com/scaleway/cluster-api-provider-scaleway/internal/service/scaleway/common" + "github.com/scaleway/scaleway-sdk-go/api/k8s/v1" + "github.com/scaleway/scaleway-sdk-go/scw" + utilerrors "k8s.io/apimachinery/pkg/util/errors" +) + +const clusterRetryTime = 30 * time.Second + +type kubeconfigGetter func() (*k8s.Kubeconfig, error) + +type Service struct { + *scope.ManagedControlPlane +} + +func New(s *scope.ManagedControlPlane) *Service { + return &Service{ManagedControlPlane: s} +} + +func (s *Service) Name() string { + return "k8s_cluster" +} + +func (s *Service) Reconcile(ctx context.Context) error { + cluster, err := s.getOrCreateCluster(ctx) + if err != nil { + return err + } + + if cluster.Status != k8s.ClusterStatusReady { + return scaleway.WithTransientError(fmt.Errorf("cluster %s is not yet ready: currently %s", cluster.ID, cluster.Status), clusterRetryTime) + } + + // Reconcile cluster type. + if desiredType := s.DesiredType(); desiredType != cluster.Type { + if err := s.ScalewayClient.SetClusterType(ctx, cluster.ID, desiredType); err != nil { + return err + } + + return scaleway.WithTransientError(fmt.Errorf("cluster %s is changing type to %s", cluster.ID, desiredType), clusterRetryTime) + } + + // Reconcile cluster version. + desiredVersion := s.DesiredVersion() + clusterUpToDate, err := common.IsUpToDate(cluster.Version, desiredVersion) + if err != nil { + return err + } + if !clusterUpToDate { + if err := s.ScalewayClient.UpgradeCluster(ctx, cluster.ID, desiredVersion); err != nil { + return err + } + + return scaleway.WithTransientError(fmt.Errorf("cluster %s is upgrading to %s", cluster.ID, desiredVersion), clusterRetryTime) + } + + // Reconcile cluster changes (tags, autoscaler, etc.). + updated, err := s.updateCluster(ctx, cluster) + if err != nil { + return err + } + if updated { + return scaleway.WithTransientError(fmt.Errorf("cluster %s is being updated", cluster.ID), clusterRetryTime) + } + + // Reconcile cluster ACL. + updated, err = s.updateClusterACLs(ctx, cluster) + if err != nil { + return err + } + if updated { + return scaleway.WithTransientError(fmt.Errorf("cluster %s is updating ACLs", cluster.ID), clusterRetryTime) + } + + // Reconcile kubeconfig. + getKubeconfigOnce := sync.OnceValues(func() (*k8s.Kubeconfig, error) { + return s.ScalewayClient.GetClusterKubeConfig(ctx, cluster.ID) + }) + if err := s.reconcileKubeconfig(ctx, cluster, getKubeconfigOnce); err != nil { + return err + } + if err := s.reconcileAdditionalKubeconfigs(ctx, cluster, getKubeconfigOnce); err != nil { + return err + } + + host, port, err := urlToHostPort(s.ClusterEndpoint(cluster)) + if err != nil { + return err + } + + s.SetControlPlaneEndpoint(host, port) + s.SetStatusVersion(cluster.Version) + + return nil +} + +func (s *Service) Delete(ctx context.Context) error { + clusterName := s.ManagedControlPlane.ManagedControlPlane.Spec.ClusterName + if clusterName == nil { + return nil + } + + cluster, err := s.ScalewayClient.FindCluster(ctx, *clusterName) + if err != nil { + if client.IsNotFoundError(err) { + return nil + } + + return err + } + + if err := s.ScalewayClient.DeleteCluster(ctx, cluster.ID, s.DeleteWithAdditionalResources()); err != nil { + return err + } + + return nil +} + +func (s *Service) getOrCreateCluster(ctx context.Context) (*k8s.Cluster, error) { + cluster, err := s.ScalewayClient.FindCluster(ctx, s.ClusterName()) + if err := utilerrors.FilterOut(err, client.IsNotFoundError); err != nil { + return nil, err + } + + if cluster == nil { + smcp := s.ManagedControlPlane.ManagedControlPlane + autoscalerConfig, err := s.DesiredClusterAutoscalerConfig() + if err != nil { + return nil, err + } + + autoUpgrade := s.DesiredAutoUpgrade() + oidcConfig := s.DesiredClusterOpenIDConnectConfig() + + var podCIDR, serviceCIDR scw.IPNet + if clusterNetwork := s.Cluster.Spec.ClusterNetwork; clusterNetwork != nil { + if clusterNetwork.Pods != nil && len(clusterNetwork.Pods.CIDRBlocks) > 0 { + _, podCIDRIPNet, err := net.ParseCIDR(clusterNetwork.Pods.CIDRBlocks[0]) + if err != nil { + return nil, err + } + + podCIDR.IPNet = *podCIDRIPNet + } + + if clusterNetwork.Services != nil && len(clusterNetwork.Services.CIDRBlocks) > 0 { + _, podCIDRIPNet, err := net.ParseCIDR(clusterNetwork.Services.CIDRBlocks[0]) + if err != nil { + return nil, err + } + + serviceCIDR.IPNet = *podCIDRIPNet + } + } + + cluster, err = s.ScalewayClient.CreateCluster( + ctx, + s.ClusterName(), + smcp.Spec.Type, + s.DesiredVersion(), + s.PrivateNetworkID(), + s.DesiredTags(), + smcp.Spec.FeatureGates, + smcp.Spec.AdmissionPlugins, + smcp.Spec.APIServerCertSANs, + s.DesiredCNI(), + &k8s.CreateClusterRequestAutoscalerConfig{ + ScaleDownDisabled: &autoscalerConfig.ScaleDownDisabled, + ScaleDownDelayAfterAdd: &autoscalerConfig.ScaleDownDelayAfterAdd, + Estimator: autoscalerConfig.Estimator, + Expander: autoscalerConfig.Expander, + IgnoreDaemonsetsUtilization: &autoscalerConfig.IgnoreDaemonsetsUtilization, + BalanceSimilarNodeGroups: &autoscalerConfig.BalanceSimilarNodeGroups, + ExpendablePodsPriorityCutoff: &autoscalerConfig.ExpendablePodsPriorityCutoff, + ScaleDownUnneededTime: &autoscalerConfig.ScaleDownUnneededTime, + ScaleDownUtilizationThreshold: &autoscalerConfig.ScaleDownUtilizationThreshold, + MaxGracefulTerminationSec: &autoscalerConfig.MaxGracefulTerminationSec, + }, + &k8s.CreateClusterRequestAutoUpgrade{ + Enable: autoUpgrade.Enabled, + MaintenanceWindow: &k8s.MaintenanceWindow{ + StartHour: autoUpgrade.MaintenanceWindow.StartHour, + Day: autoUpgrade.MaintenanceWindow.Day, + }, + }, + &k8s.CreateClusterRequestOpenIDConnectConfig{ + IssuerURL: oidcConfig.IssuerURL, + ClientID: oidcConfig.ClientID, + UsernameClaim: &oidcConfig.UsernameClaim, + UsernamePrefix: &oidcConfig.UsernamePrefix, + GroupsClaim: &oidcConfig.GroupsClaim, + GroupsPrefix: &oidcConfig.GroupsPrefix, + RequiredClaim: &oidcConfig.RequiredClaim, + }, + podCIDR, + serviceCIDR, + ) + if err != nil { + return nil, err + } + } + + return cluster, nil +} + +func (s *Service) updateCluster(ctx context.Context, cluster *k8s.Cluster) (bool, error) { + updateNeeded := false + smmp := s.ManagedControlPlane.ManagedControlPlane + + var tags *[]string + if !common.SlicesEqualIgnoreOrder(client.TagsWithoutCreatedBy(cluster.Tags), s.DesiredTags()) { + updateNeeded = true + tags = scw.StringsPtr(s.DesiredTags()) + } + + var featureGates *[]string + if !common.SlicesEqualIgnoreOrder(cluster.FeatureGates, smmp.Spec.FeatureGates) { + updateNeeded = true + featureGates = scw.StringsPtr(makeSliceIfNeeded(smmp.Spec.FeatureGates)) + } + + var admissionPlugins *[]string + if !common.SlicesEqualIgnoreOrder(cluster.AdmissionPlugins, smmp.Spec.AdmissionPlugins) { + updateNeeded = true + admissionPlugins = scw.StringsPtr(makeSliceIfNeeded(smmp.Spec.AdmissionPlugins)) + } + + var apiServerCertSANs *[]string + if !common.SlicesEqualIgnoreOrder(cluster.ApiserverCertSans, smmp.Spec.APIServerCertSANs) { + updateNeeded = true + apiServerCertSANs = scw.StringsPtr(makeSliceIfNeeded(smmp.Spec.APIServerCertSANs)) + } + + var autoscalerConfig *k8s.UpdateClusterRequestAutoscalerConfig + desiredAutoscalerConfig, err := s.DesiredClusterAutoscalerConfig() + if err != nil { + return false, err + } + if !autoscalerConfigMatchesDesired(cluster.AutoscalerConfig, desiredAutoscalerConfig) { + updateNeeded = true + autoscalerConfig = &k8s.UpdateClusterRequestAutoscalerConfig{ + ScaleDownDisabled: &desiredAutoscalerConfig.ScaleDownDisabled, + ScaleDownDelayAfterAdd: &desiredAutoscalerConfig.ScaleDownDelayAfterAdd, + Estimator: desiredAutoscalerConfig.Estimator, + Expander: desiredAutoscalerConfig.Expander, + IgnoreDaemonsetsUtilization: &desiredAutoscalerConfig.IgnoreDaemonsetsUtilization, + BalanceSimilarNodeGroups: &desiredAutoscalerConfig.BalanceSimilarNodeGroups, + ExpendablePodsPriorityCutoff: &desiredAutoscalerConfig.ExpendablePodsPriorityCutoff, + ScaleDownUnneededTime: &desiredAutoscalerConfig.ScaleDownUnneededTime, + ScaleDownUtilizationThreshold: &desiredAutoscalerConfig.ScaleDownUtilizationThreshold, + MaxGracefulTerminationSec: &desiredAutoscalerConfig.MaxGracefulTerminationSec, + } + } + + var autoUpgrade *k8s.UpdateClusterRequestAutoUpgrade + desiredAutoUpgrade := s.DesiredAutoUpgrade() + if !clusterAutoUpgradeMatchesDesired(cluster.AutoUpgrade, desiredAutoUpgrade) { + updateNeeded = true + autoUpgrade = &k8s.UpdateClusterRequestAutoUpgrade{ + Enable: &desiredAutoUpgrade.Enabled, + MaintenanceWindow: desiredAutoUpgrade.MaintenanceWindow, + } + } + + var oidcConfig *k8s.UpdateClusterRequestOpenIDConnectConfig + desiredOIDCConfig := s.DesiredClusterOpenIDConnectConfig() + if !clusterOpenIDConnectConfigMatchesDesired(cluster.OpenIDConnectConfig, desiredOIDCConfig) { + updateNeeded = true + oidcConfig = &k8s.UpdateClusterRequestOpenIDConnectConfig{ + IssuerURL: &desiredOIDCConfig.IssuerURL, + ClientID: &desiredOIDCConfig.ClientID, + UsernameClaim: &desiredOIDCConfig.UsernameClaim, + UsernamePrefix: &desiredOIDCConfig.UsernamePrefix, + GroupsClaim: &desiredOIDCConfig.GroupsClaim, + GroupsPrefix: &desiredOIDCConfig.GroupsPrefix, + RequiredClaim: &desiredOIDCConfig.RequiredClaim, + } + } + + if !updateNeeded { + return false, nil + } + + if err := s.ScalewayClient.UpdateCluster( + ctx, + cluster.ID, + tags, featureGates, admissionPlugins, apiServerCertSANs, + autoscalerConfig, + autoUpgrade, + oidcConfig, + ); err != nil { + return false, fmt.Errorf("failed to update cluster: %w", err) + } + + return true, nil +} + +func (s *Service) updateClusterACLs(ctx context.Context, cluster *k8s.Cluster) (bool, error) { + acls, err := s.ScalewayClient.ListClusterACLRules(ctx, cluster.ID) + if err != nil { + return false, err + } + + desired := s.DesiredAllowedRanges() + currentRanges, currentScalewayRanges := currentAllowedRanges(acls) + + if common.SlicesEqualIgnoreOrder(desired, currentRanges) { + return false, nil + } + + request := make([]*k8s.ACLRuleRequest, 0, len(currentRanges)+1) + + for _, cidr := range desired { + _, ipNet, err := net.ParseCIDR(cidr) + if err != nil { + return false, fmt.Errorf("failed to parse range: %w", err) + } + + request = append(request, &k8s.ACLRuleRequest{ + IP: &scw.IPNet{IPNet: *ipNet}, + }) + } + + if currentScalewayRanges { + request = append(request, &k8s.ACLRuleRequest{ + ScalewayRanges: scw.BoolPtr(true), + }) + } + + if err := s.ScalewayClient.SetClusterACLRules(ctx, cluster.ID, request); err != nil { + return false, fmt.Errorf("failed to set ACLs: %w", err) + } + + return true, nil +} + +func currentAllowedRanges(rules []*k8s.ACLRule) (ranges []string, scalewayRanges bool) { + ranges = make([]string, 0, len(rules)) + + for _, rule := range rules { + if rule.ScalewayRanges != nil { + scalewayRanges = *rule.ScalewayRanges + } else if rule.IP != nil { + ranges = append(ranges, rule.IP.String()) + } + } + + return +} + +func urlToHostPort(s string) (string, int32, error) { + u, err := url.Parse(s) + if err != nil { + return "", 0, err + } + + port, err := strconv.Atoi(u.Port()) + if err != nil { + return "", 0, err + } + + return u.Hostname(), int32(port), nil +} + +func makeSliceIfNeeded[T any](s []T) []T { + if s == nil { + return make([]T, 0) + } + + return s +} + +func autoscalerConfigMatchesDesired(current, desired *k8s.ClusterAutoscalerConfig) bool { + if current == nil || desired == nil { + return true + } + + return current.ScaleDownDisabled == desired.ScaleDownDisabled && + current.ScaleDownDelayAfterAdd == desired.ScaleDownDelayAfterAdd && + current.Estimator == desired.Estimator && + current.Expander == desired.Expander && + current.IgnoreDaemonsetsUtilization == desired.IgnoreDaemonsetsUtilization && + current.BalanceSimilarNodeGroups == desired.BalanceSimilarNodeGroups && + current.ExpendablePodsPriorityCutoff == desired.ExpendablePodsPriorityCutoff && + current.ScaleDownUnneededTime == desired.ScaleDownUnneededTime && + current.ScaleDownUtilizationThreshold == desired.ScaleDownUtilizationThreshold && + current.MaxGracefulTerminationSec == desired.MaxGracefulTerminationSec +} + +func clusterAutoUpgradeMatchesDesired(current, desired *k8s.ClusterAutoUpgrade) bool { + if current == nil || desired == nil || current.MaintenanceWindow == nil || desired.MaintenanceWindow == nil { + return true + } + + return current.Enabled == desired.Enabled && + current.MaintenanceWindow.Day == desired.MaintenanceWindow.Day && + current.MaintenanceWindow.StartHour == desired.MaintenanceWindow.StartHour +} + +func clusterOpenIDConnectConfigMatchesDesired(current, desired *k8s.ClusterOpenIDConnectConfig) bool { + if current == nil || desired == nil { + return true + } + + return current.IssuerURL == desired.IssuerURL && + current.ClientID == desired.ClientID && + current.UsernameClaim == desired.UsernameClaim && + current.UsernamePrefix == desired.UsernamePrefix && + common.SlicesEqualIgnoreOrder(current.GroupsClaim, desired.GroupsClaim) && + current.GroupsPrefix == desired.GroupsPrefix && + common.SlicesEqualIgnoreOrder(current.RequiredClaim, desired.RequiredClaim) +} diff --git a/internal/service/scaleway/k8s/cluster/cluster_test.go b/internal/service/scaleway/k8s/cluster/cluster_test.go new file mode 100644 index 0000000..6a73925 --- /dev/null +++ b/internal/service/scaleway/k8s/cluster/cluster_test.go @@ -0,0 +1,439 @@ +package cluster + +import ( + "context" + "fmt" + "net" + "testing" + + . "github.com/onsi/gomega" + "github.com/scaleway/cluster-api-provider-scaleway/api/v1alpha1" + "github.com/scaleway/cluster-api-provider-scaleway/internal/scope" + "github.com/scaleway/cluster-api-provider-scaleway/internal/service/scaleway/client" + "github.com/scaleway/cluster-api-provider-scaleway/internal/service/scaleway/client/mock_client" + "github.com/scaleway/scaleway-sdk-go/api/k8s/v1" + "github.com/scaleway/scaleway-sdk-go/scw" + "go.uber.org/mock/gomock" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +const ( + clusterID = "11111111-1111-1111-1111-111111111111" + projectID = "11111111-1111-1111-1111-111111111111" + privateNetworkID = "11111111-1111-1111-1111-111111111111" +) + +func TestService_Reconcile(t *testing.T) { + t.Parallel() + + range0 := "0.0.0.0/0" + _, ipNet0, err := net.ParseCIDR(range0) + if err != nil { + t.Fatal(err) + } + + type fields struct { + ManagedControlPlane *scope.ManagedControlPlane + } + type args struct { + ctx context.Context + } + tests := []struct { + name string + fields fields + args args + wantErr bool + objects []runtime.Object + expect func(i *mock_client.MockInterfaceMockRecorder) + asserts func(g *WithT, s *scope.ManagedControlPlane) + }{ + { + name: "create control-plane", + fields: fields{ + ManagedControlPlane: &scope.ManagedControlPlane{ + Cluster: &v1beta1.Cluster{ + ObjectMeta: v1.ObjectMeta{ + Name: "cluster", + Namespace: "default", + }, + Spec: v1beta1.ClusterSpec{}, + }, + ManagedCluster: &v1alpha1.ScalewayManagedCluster{ + ObjectMeta: v1.ObjectMeta{ + Name: "managedcluster", + Namespace: "default", + }, + Spec: v1alpha1.ScalewayManagedClusterSpec{ + ProjectID: projectID, + }, + Status: v1alpha1.ScalewayManagedClusterStatus{ + Ready: true, + Network: &v1alpha1.ManagedNetworkStatus{ + PrivateNetworkID: scw.StringPtr("11111111-1111-1111-1111-111111111111"), + }, + }, + }, + ManagedControlPlane: &v1alpha1.ScalewayManagedControlPlane{ + ObjectMeta: v1.ObjectMeta{ + Name: "controlplane", + Namespace: "default", + }, + Spec: v1alpha1.ScalewayManagedControlPlaneSpec{ + Type: "kapsule", + Version: "v1.31.1", + CNI: scw.StringPtr("cilium"), + }, + }, + }, + }, + args: args{ + ctx: context.TODO(), + }, + objects: []runtime.Object{}, + expect: func(i *mock_client.MockInterfaceMockRecorder) { + i.FindCluster(gomock.Any(), "default-controlplane").Return(nil, client.ErrNoItemFound) + i.CreateCluster( + gomock.Any(), + "default-controlplane", + "kapsule", + "1.31.1", + scw.StringPtr(privateNetworkID), + []string{"caps-namespace=default", "caps-scalewaymanagedcontrolplane=controlplane"}, + nil, + nil, + nil, + k8s.CNICilium, + &k8s.CreateClusterRequestAutoscalerConfig{ + ScaleDownDisabled: scw.BoolPtr(false), + ScaleDownDelayAfterAdd: scw.StringPtr("10m"), + Estimator: k8s.AutoscalerEstimatorBinpacking, + Expander: k8s.AutoscalerExpanderRandom, + IgnoreDaemonsetsUtilization: scw.BoolPtr(false), + BalanceSimilarNodeGroups: scw.BoolPtr(false), + ExpendablePodsPriorityCutoff: scw.Int32Ptr(-10), + ScaleDownUnneededTime: scw.StringPtr("10m"), + ScaleDownUtilizationThreshold: scw.Float32Ptr(0.5), + MaxGracefulTerminationSec: scw.Uint32Ptr(600), + }, + &k8s.CreateClusterRequestAutoUpgrade{ + Enable: false, + MaintenanceWindow: &k8s.MaintenanceWindow{ + StartHour: 0, + Day: k8s.MaintenanceWindowDayOfTheWeekAny, + }, + }, + &k8s.CreateClusterRequestOpenIDConnectConfig{ + UsernameClaim: scw.StringPtr(""), + UsernamePrefix: scw.StringPtr(""), + GroupsPrefix: scw.StringPtr(""), + GroupsClaim: &[]string{}, + RequiredClaim: &[]string{}, + }, + scw.IPNet{}, + scw.IPNet{}, + ).Return(&k8s.Cluster{ + ID: clusterID, + Status: k8s.ClusterStatusReady, + Type: "kapsule", + Version: "1.31.1", + Tags: []string{"caps-namespace=default", "caps-scalewaymanagedcontrolplane=controlplane", "created-by=cluster-api-provider-scaleway"}, + Cni: k8s.CNICilium, + ClusterURL: fmt.Sprintf("https://%s.api.k8s.fr-par.scw.cloud:6443", clusterID), + AutoscalerConfig: &k8s.ClusterAutoscalerConfig{ + ScaleDownDisabled: false, + ScaleDownDelayAfterAdd: "10m", + Estimator: k8s.AutoscalerEstimatorBinpacking, + Expander: k8s.AutoscalerExpanderRandom, + IgnoreDaemonsetsUtilization: false, + BalanceSimilarNodeGroups: false, + ExpendablePodsPriorityCutoff: -10, + ScaleDownUnneededTime: "10m", + ScaleDownUtilizationThreshold: 0.5, + MaxGracefulTerminationSec: 600, + }, + AutoUpgrade: &k8s.ClusterAutoUpgrade{ + Enabled: false, + MaintenanceWindow: &k8s.MaintenanceWindow{ + StartHour: 0, + Day: k8s.MaintenanceWindowDayOfTheWeekAny, + }, + }, + OpenIDConnectConfig: &k8s.ClusterOpenIDConnectConfig{}, + }, nil) + i.ListClusterACLRules(gomock.Any(), clusterID).Return([]*k8s.ACLRule{ + {IP: &scw.IPNet{IPNet: *ipNet0}}, + }, nil) + i.GetClusterKubeConfig(gomock.Any(), clusterID).Return(&k8s.Kubeconfig{ + Clusters: []*k8s.KubeconfigClusterWithName{ + { + Name: "default-controlplane", + Cluster: k8s.KubeconfigCluster{ + CertificateAuthorityData: "fake", + }, + }, + }, + }, nil) + i.GetSecretKey().Return("secret-key") + }, + asserts: func(g *WithT, s *scope.ManagedControlPlane) { + g.Expect(s.ManagedControlPlane.Spec.ClusterName).To(HaveValue(Equal("default-controlplane"))) + g.Expect(s.ManagedControlPlane.Status.Version).To(HaveValue(Equal("v1.31.1"))) + g.Expect(s.ManagedControlPlane.Spec.ControlPlaneEndpoint.Host).To(Equal(fmt.Sprintf("%s.api.k8s.fr-par.scw.cloud", clusterID))) + g.Expect(s.ManagedControlPlane.Spec.ControlPlaneEndpoint.Port).To(BeEquivalentTo(6443)) + + kubeconfig := &corev1.Secret{} + g.Expect(s.Client.Get(context.TODO(), types.NamespacedName{ + Namespace: "default", + Name: "cluster-kubeconfig", + }, kubeconfig)).To(Succeed()) + g.Expect(kubeconfig.Data).To(HaveKey("value")) + + kubeconfig = &corev1.Secret{} + g.Expect(s.Client.Get(context.TODO(), types.NamespacedName{ + Namespace: "default", + Name: "cluster-user-kubeconfig", + }, kubeconfig)).To(Succeed()) + g.Expect(kubeconfig.Data).To(HaveKey("value")) + }, + }, + { + name: "control-plane is already created and up-to-date", + fields: fields{ + ManagedControlPlane: &scope.ManagedControlPlane{ + Cluster: &v1beta1.Cluster{ + Spec: v1beta1.ClusterSpec{}, + }, + ManagedCluster: &v1alpha1.ScalewayManagedCluster{ + ObjectMeta: v1.ObjectMeta{ + Name: "managedcluster", + Namespace: "default", + }, + Status: v1alpha1.ScalewayManagedClusterStatus{ + Ready: true, + Network: &v1alpha1.ManagedNetworkStatus{ + PrivateNetworkID: scw.StringPtr("11111111-1111-1111-1111-111111111111"), + }, + }, + }, + ManagedControlPlane: &v1alpha1.ScalewayManagedControlPlane{ + ObjectMeta: v1.ObjectMeta{ + Name: "controlplane", + Namespace: "default", + }, + Spec: v1alpha1.ScalewayManagedControlPlaneSpec{ + Type: "kapsule", + Version: "v1.31.1", + CNI: scw.StringPtr("cilium"), + ClusterName: scw.StringPtr("default-controlplane"), + ControlPlaneEndpoint: v1beta1.APIEndpoint{ + Host: fmt.Sprintf("%s.api.k8s.fr-par.scw.cloud", clusterID), + Port: 6443, + }, + }, + Status: v1alpha1.ScalewayManagedControlPlaneStatus{ + Ready: true, + Initialized: true, + ExternalManagedControlPlane: true, + Version: scw.StringPtr("v1.31.1"), + }, + }, + }, + }, + args: args{ + ctx: context.TODO(), + }, + objects: []runtime.Object{}, + expect: func(i *mock_client.MockInterfaceMockRecorder) { + i.FindCluster(gomock.Any(), "default-controlplane").Return(&k8s.Cluster{ + ID: clusterID, + Status: k8s.ClusterStatusReady, + Type: "kapsule", + Version: "1.31.1", + Tags: []string{"caps-namespace=default", "caps-scalewaymanagedcontrolplane=controlplane", "created-by=cluster-api-provider-scaleway"}, + Cni: k8s.CNICilium, + ClusterURL: fmt.Sprintf("https://%s.api.k8s.fr-par.scw.cloud:6443", clusterID), + AutoscalerConfig: &k8s.ClusterAutoscalerConfig{ + ScaleDownDisabled: false, + ScaleDownDelayAfterAdd: "10m", + Estimator: k8s.AutoscalerEstimatorBinpacking, + Expander: k8s.AutoscalerExpanderRandom, + IgnoreDaemonsetsUtilization: false, + BalanceSimilarNodeGroups: false, + ExpendablePodsPriorityCutoff: -10, + ScaleDownUnneededTime: "10m", + ScaleDownUtilizationThreshold: 0.5, + MaxGracefulTerminationSec: 600, + }, + AutoUpgrade: &k8s.ClusterAutoUpgrade{ + Enabled: false, + MaintenanceWindow: &k8s.MaintenanceWindow{ + StartHour: 0, + Day: k8s.MaintenanceWindowDayOfTheWeekAny, + }, + }, + OpenIDConnectConfig: &k8s.ClusterOpenIDConnectConfig{}, + }, nil) + i.ListClusterACLRules(gomock.Any(), clusterID).Return([]*k8s.ACLRule{ + {IP: &scw.IPNet{IPNet: *ipNet0}}, + }, nil) + i.GetClusterKubeConfig(gomock.Any(), clusterID).Return(&k8s.Kubeconfig{ + Clusters: []*k8s.KubeconfigClusterWithName{ + { + Name: "default-controlplane", + Cluster: k8s.KubeconfigCluster{ + CertificateAuthorityData: "fake", + }, + }, + }, + }, nil) + i.GetSecretKey().Return("secret-key") + }, + asserts: func(g *WithT, s *scope.ManagedControlPlane) {}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + g := NewWithT(t) + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + scwMock := mock_client.NewMockInterface(mockCtrl) + + tt.expect(scwMock.EXPECT()) + s := &Service{ + ManagedControlPlane: tt.fields.ManagedControlPlane, + } + s.Client = fake.NewFakeClient(tt.objects...) + s.ScalewayClient = scwMock + if err := s.Reconcile(tt.args.ctx); (err != nil) != tt.wantErr { + t.Errorf("Service.Reconcile() error = %v, wantErr %v", err, tt.wantErr) + } + + tt.asserts(g, s.ManagedControlPlane) + }) + } +} + +func TestService_Delete(t *testing.T) { + t.Parallel() + type fields struct { + ManagedControlPlane *scope.ManagedControlPlane + } + type args struct { + ctx context.Context + } + tests := []struct { + name string + fields fields + args args + wantErr bool + expect func(i *mock_client.MockInterfaceMockRecorder) + }{ + { + name: "delete cluster", + fields: fields{ + ManagedControlPlane: &scope.ManagedControlPlane{ + ManagedControlPlane: &v1alpha1.ScalewayManagedControlPlane{ + ObjectMeta: v1.ObjectMeta{ + Name: "controlplane", + Namespace: "default", + }, + Spec: v1alpha1.ScalewayManagedControlPlaneSpec{ + Type: "kapsule", + Version: "v1.31.1", + CNI: scw.StringPtr("cilium"), + ClusterName: scw.StringPtr("default-controlplane"), + ControlPlaneEndpoint: v1beta1.APIEndpoint{ + Host: fmt.Sprintf("%s.api.k8s.fr-par.scw.cloud", clusterID), + Port: 6443, + }, + }, + Status: v1alpha1.ScalewayManagedControlPlaneStatus{ + Ready: true, + Initialized: true, + ExternalManagedControlPlane: true, + Version: scw.StringPtr("v1.31.1"), + }, + }, + }, + }, + args: args{ + ctx: context.TODO(), + }, + expect: func(i *mock_client.MockInterfaceMockRecorder) { + i.FindCluster(gomock.Any(), "default-controlplane").Return(&k8s.Cluster{ + ID: clusterID, + }, nil) + i.DeleteCluster(gomock.Any(), clusterID, false) + }, + }, + { + name: "delete cluster with additional resources", + fields: fields{ + ManagedControlPlane: &scope.ManagedControlPlane{ + ManagedControlPlane: &v1alpha1.ScalewayManagedControlPlane{ + ObjectMeta: v1.ObjectMeta{ + Name: "controlplane", + Namespace: "default", + }, + Spec: v1alpha1.ScalewayManagedControlPlaneSpec{ + Type: "kapsule", + Version: "v1.31.1", + CNI: scw.StringPtr("cilium"), + ClusterName: scw.StringPtr("default-controlplane"), + ControlPlaneEndpoint: v1beta1.APIEndpoint{ + Host: fmt.Sprintf("%s.api.k8s.fr-par.scw.cloud", clusterID), + Port: 6443, + }, + OnDelete: &v1alpha1.OnDeleteSpec{ + WithAdditionalResources: scw.BoolPtr(true), + }, + }, + Status: v1alpha1.ScalewayManagedControlPlaneStatus{ + Ready: true, + Initialized: true, + ExternalManagedControlPlane: true, + Version: scw.StringPtr("v1.31.1"), + }, + }, + }, + }, + args: args{ + ctx: context.TODO(), + }, + expect: func(i *mock_client.MockInterfaceMockRecorder) { + i.FindCluster(gomock.Any(), "default-controlplane").Return(&k8s.Cluster{ + ID: clusterID, + }, nil) + i.DeleteCluster(gomock.Any(), clusterID, true).Return(nil) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + scwMock := mock_client.NewMockInterface(mockCtrl) + + tt.expect(scwMock.EXPECT()) + s := &Service{ + ManagedControlPlane: tt.fields.ManagedControlPlane, + } + s.ScalewayClient = scwMock + if err := s.Delete(tt.args.ctx); (err != nil) != tt.wantErr { + t.Errorf("Service.Delete() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} diff --git a/internal/service/scaleway/k8s/cluster/kubeconfig.go b/internal/service/scaleway/k8s/cluster/kubeconfig.go new file mode 100644 index 0000000..28beecd --- /dev/null +++ b/internal/service/scaleway/k8s/cluster/kubeconfig.go @@ -0,0 +1,216 @@ +package cluster + +import ( + "context" + "encoding/base64" + "fmt" + + infrav1 "github.com/scaleway/cluster-api-provider-scaleway/api/v1alpha1" + "github.com/scaleway/scaleway-sdk-go/api/k8s/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/clientcmd/api" + "sigs.k8s.io/cluster-api/util/kubeconfig" + "sigs.k8s.io/cluster-api/util/secret" +) + +func (s *Service) reconcileKubeconfig(ctx context.Context, cluster *k8s.Cluster, getKubeconfig kubeconfigGetter) error { + clusterRef := types.NamespacedName{ + Name: s.Cluster.Name, + Namespace: s.Cluster.Namespace, + } + + configSecret, err := secret.GetFromNamespacedName(ctx, s.Client, clusterRef, secret.Kubeconfig) + if err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("getting kubeconfig secret %s: %w", clusterRef, err) + } + + if createErr := s.createCAPIKubeconfigSecret(ctx, cluster, getKubeconfig, &clusterRef); createErr != nil { + return fmt.Errorf("creating kubeconfig secret: %w", createErr) + } + } else if updateErr := s.updateCAPIKubeconfigSecret(ctx, configSecret); updateErr != nil { + return fmt.Errorf("updating kubeconfig secret: %w", err) + } + + return nil +} + +func (s *Service) reconcileAdditionalKubeconfigs(ctx context.Context, cluster *k8s.Cluster, getKubeconfig kubeconfigGetter) error { + clusterRef := types.NamespacedName{ + Name: s.Cluster.Name + "-user", + Namespace: s.Cluster.Namespace, + } + + // Create the additional kubeconfig for users. This doesn't need updating on every sync + if _, err := secret.GetFromNamespacedName(ctx, s.Client, clusterRef, secret.Kubeconfig); err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("getting kubeconfig (user) secret %s: %w", clusterRef, err) + } + + createErr := s.createUserKubeconfigSecret( + ctx, + cluster, + getKubeconfig, + &clusterRef, + ) + if createErr != nil { + return fmt.Errorf("creating additional kubeconfig secret: %w", err) + } + } + + return nil +} + +func (s *Service) createUserKubeconfigSecret(ctx context.Context, cluster *k8s.Cluster, getKubeconfig kubeconfigGetter, clusterRef *types.NamespacedName) error { + controllerOwnerRef := *metav1.NewControllerRef(s.ManagedControlPlane.ManagedControlPlane, infrav1.GroupVersion.WithKind("ScalewayManagedControlPlane")) + + contextName := s.getKubeConfigContextName(false) + + kc, err := getKubeconfig() + if err != nil { + return err + } + + cfg, err := s.createBaseKubeConfig(contextName, cluster, kc) + if err != nil { + return fmt.Errorf("creating base kubeconfig: %w", err) + } + + execConfig := &api.ExecConfig{ + APIVersion: "client.authentication.k8s.io/v1", + Command: "scw", + Args: []string{"k8s", "exec-credential"}, + InteractiveMode: api.NeverExecInteractiveMode, + InstallHint: "Install scaleway CLI for use with kubectl by following\n https://cli.scaleway.com/#installation", + } + cfg.AuthInfos = map[string]*api.AuthInfo{ + contextName: { + Exec: execConfig, + }, + } + + out, err := clientcmd.Write(*cfg) + if err != nil { + return fmt.Errorf("serialize kubeconfig to yaml: %w", err) + } + + kubeconfigSecret := kubeconfig.GenerateSecretWithOwner(*clusterRef, out, controllerOwnerRef) + if err := s.Client.Create(ctx, kubeconfigSecret); err != nil { + return fmt.Errorf("creating secret: %w", err) + } + + return nil +} + +func (s *Service) createCAPIKubeconfigSecret(ctx context.Context, cluster *k8s.Cluster, getKubeconfig kubeconfigGetter, clusterRef *types.NamespacedName) error { + controllerOwnerRef := *metav1.NewControllerRef(s.ManagedControlPlane.ManagedControlPlane, infrav1.GroupVersion.WithKind("ScalewayManagedControlPlane")) + + contextName := s.getKubeConfigContextName(false) + + kc, err := getKubeconfig() + if err != nil { + return err + } + + cfg, err := s.createBaseKubeConfig(contextName, cluster, kc) + if err != nil { + return fmt.Errorf("creating base kubeconfig: %w", err) + } + + cfg.AuthInfos = map[string]*api.AuthInfo{ + contextName: { + Token: s.ScalewayClient.GetSecretKey(), + }, + } + + out, err := clientcmd.Write(*cfg) + if err != nil { + return fmt.Errorf("serialize kubeconfig to yaml: %w", err) + } + + kubeconfigSecret := kubeconfig.GenerateSecretWithOwner(*clusterRef, out, controllerOwnerRef) + if err := s.Client.Create(ctx, kubeconfigSecret); err != nil { + return fmt.Errorf("creating secret: %w", err) + } + + return nil +} + +func (s *Service) updateCAPIKubeconfigSecret(ctx context.Context, configSecret *corev1.Secret) error { + data, ok := configSecret.Data[secret.KubeconfigDataName] + if !ok { + return fmt.Errorf("missing key %q in secret data", secret.KubeconfigDataName) + } + + config, err := clientcmd.Load(data) + if err != nil { + return fmt.Errorf("failed to convert kubeconfig Secret into a clientcmdapi.Config: %w", err) + } + + contextName := s.getKubeConfigContextName(false) + + if config.AuthInfos[contextName] == nil { + return nil + } + + if config.AuthInfos[contextName].Token == s.ScalewayClient.GetSecretKey() { + return nil + } + + config.AuthInfos[contextName].Token = s.ScalewayClient.GetSecretKey() + + out, err := clientcmd.Write(*config) + if err != nil { + return fmt.Errorf("failed to serialize config to yaml: %w", err) + } + + configSecret.Data[secret.KubeconfigDataName] = out + + if err := s.Client.Update(ctx, configSecret); err != nil { + return fmt.Errorf("updating kubeconfig secret: %w", err) + } + + return nil +} + +func (s *Service) createBaseKubeConfig(contextName string, cluster *k8s.Cluster, kc *k8s.Kubeconfig) (*api.Config, error) { + b64CACert, err := kc.GetCertificateAuthorityData() + if err != nil { + return nil, err + } + certData, err := base64.StdEncoding.DecodeString(b64CACert) + if err != nil { + return nil, fmt.Errorf("decoding cluster CA cert: %w", err) + } + + cfg := &api.Config{ + APIVersion: api.SchemeGroupVersion.Version, + Clusters: map[string]*api.Cluster{ + contextName: { + Server: s.ClusterEndpoint(cluster), + CertificateAuthorityData: certData, + }, + }, + Contexts: map[string]*api.Context{ + contextName: { + Cluster: contextName, + AuthInfo: contextName, + }, + }, + CurrentContext: contextName, + } + + return cfg, nil +} + +func (s *Service) getKubeConfigContextName(isUser bool) string { + contextName := fmt.Sprintf("scw_%s_%s_%s", s.ManagedCluster.Spec.ProjectID, s.ManagedCluster.Spec.Region, s.ClusterName()) + if isUser { + contextName += "-user" + } + return contextName +} diff --git a/internal/service/scaleway/k8s/pool/pool.go b/internal/service/scaleway/k8s/pool/pool.go new file mode 100644 index 0000000..6ab5112 --- /dev/null +++ b/internal/service/scaleway/k8s/pool/pool.go @@ -0,0 +1,267 @@ +package pool + +import ( + "context" + "errors" + "fmt" + "maps" + "slices" + "time" + + "github.com/scaleway/cluster-api-provider-scaleway/internal/scope" + "github.com/scaleway/cluster-api-provider-scaleway/internal/service/scaleway" + "github.com/scaleway/cluster-api-provider-scaleway/internal/service/scaleway/client" + "github.com/scaleway/cluster-api-provider-scaleway/internal/service/scaleway/common" + "github.com/scaleway/scaleway-sdk-go/api/k8s/v1" + "github.com/scaleway/scaleway-sdk-go/scw" + utilerrors "k8s.io/apimachinery/pkg/util/errors" +) + +const poolRetryTime = 30 * time.Second + +type Service struct { + *scope.ManagedMachinePool +} + +func New(s *scope.ManagedMachinePool) *Service { + return &Service{s} +} + +func (s Service) Name() string { + return "k8s_pool" +} + +func (s *Service) Delete(ctx context.Context) error { + clusterName, ok := s.ClusterName() + if !ok { + return nil + } + + cluster, err := s.ScalewayClient.FindCluster(ctx, clusterName) + if err != nil { + if client.IsNotFoundError(err) { + return nil + } + + return err + } + + pool, err := s.ScalewayClient.FindPool(ctx, cluster.ID, s.ResourceName()) + if err != nil { + if client.IsNotFoundError(err) { + return nil + } + + return err + } + + if pool.Status != k8s.PoolStatusDeleting { + if err := s.ScalewayClient.DeletePool(ctx, pool.ID); err != nil { + return err + } + } + + return scaleway.WithTransientError(errors.New("pool is being deleted"), poolRetryTime) +} + +func (s *Service) Reconcile(ctx context.Context) error { + clusterName, ok := s.ClusterName() + if !ok { + return scaleway.WithTransientError(errors.New("cluster name not set"), poolRetryTime) + } + + cluster, err := s.ScalewayClient.FindCluster(ctx, clusterName) + if err != nil { + if client.IsNotFoundError(err) { + return scaleway.WithTransientError(errors.New("cluster does not exist yet"), poolRetryTime) + } + return err + } + + if !slices.Contains([]k8s.ClusterStatus{ + k8s.ClusterStatusReady, + k8s.ClusterStatusPoolRequired, + }, cluster.Status) { + return scaleway.WithTransientError(fmt.Errorf("cluster %s is not yet ready: currently %s", cluster.ID, cluster.Status), poolRetryTime) + } + + pool, err := s.getOrCreatePool(ctx, cluster) + if err != nil { + return err + } + + if pool.Status != k8s.PoolStatusReady { + return scaleway.WithTransientError(fmt.Errorf("pool %s is not yet ready: currently %s", pool.ID, pool.Status), poolRetryTime) + } + + // Reconcile pool version. + if desiredVersion := s.DesiredVersion(); desiredVersion != nil { + poolUpToDate, err := common.IsUpToDate(pool.Version, *desiredVersion) + if err != nil { + return err + } + if !poolUpToDate { + if err := s.ScalewayClient.UpgradePool(ctx, pool.ID, *desiredVersion); err != nil { + return err + } + + return scaleway.WithTransientError(fmt.Errorf("pool %s is upgrading to %s", cluster.ID, *desiredVersion), poolRetryTime) + } + } + + // Reconcile pools changes (size, tags, etc.). + updated, err := s.updatePool(ctx, pool) + if err != nil { + return err + } + if updated { + return scaleway.WithTransientError(fmt.Errorf("pool %s is being updated", cluster.ID), poolRetryTime) + } + + nodes, err := s.ScalewayClient.ListNodes(ctx, cluster.ID, pool.ID) + if err != nil { + return err + } + + s.SetProviderIDs(nodes) + s.SetStatusReplicas(pool.Size) + + return nil +} + +func (s *Service) getOrCreatePool(ctx context.Context, cluster *k8s.Cluster) (*k8s.Pool, error) { + pool, err := s.ScalewayClient.FindPool(ctx, cluster.ID, s.ResourceName()) + if err := utilerrors.FilterOut(err, client.IsNotFoundError); err != nil { + return nil, err + } + + if pool == nil { + mmp := s.ManagedMachinePool.ManagedMachinePool + + autoscaling, size, min, max := s.Scaling() + pup := s.DesiredPoolUpgradePolicy() + + pool, err = s.ScalewayClient.CreatePool( + ctx, + scw.Zone(mmp.Spec.Zone), + cluster.ID, + s.ResourceName(), + mmp.Spec.NodeType, + mmp.Spec.PlacementGroupID, + mmp.Spec.SecurityGroupID, + autoscaling, + s.Autohealing(), + s.PublicIPDisabled(), + size, + &min, + &max, + s.DesiredTags(), + mmp.Spec.KubeletArgs, + s.RootVolumeType(), + s.RootVolumeSizeGB(), + &k8s.CreatePoolRequestUpgradePolicy{ + MaxUnavailable: &pup.MaxUnavailable, + MaxSurge: &pup.MaxSurge, + }, + ) + if err != nil { + return nil, err + } + } + + return pool, nil +} + +func (s *Service) updatePool(ctx context.Context, pool *k8s.Pool) (bool, error) { + updateNeeded := false + + var autohealing *bool + if pool.Autohealing != s.Autohealing() { + updateNeeded = true + autohealing = scw.BoolPtr(s.Autohealing()) + } + + var autoscaling *bool + var size, minSize, maxSize *uint32 + + if pool.NodeType != "external" { + desiredAutoscaling, desiredSize, desiredMin, desiredMax := s.Scaling() + + if pool.Autoscaling != desiredAutoscaling { + updateNeeded = true + autoscaling = &desiredAutoscaling + } + + // Only reconcile minSize and maxSize when autoscaling is enabled. + if desiredAutoscaling { + if pool.MinSize != desiredMin { + updateNeeded = true + minSize = &desiredMin + } + + if pool.MaxSize != desiredMax { + updateNeeded = true + maxSize = &desiredMax + } + } else { + // Only reconcile size when autoscaling is disabled. + if pool.Size != desiredSize { + updateNeeded = true + size = &desiredSize + } + } + } + + var tags *[]string + if !common.SlicesEqualIgnoreOrder(client.TagsWithoutCreatedBy(pool.Tags), s.DesiredTags()) { + updateNeeded = true + tags = scw.StringsPtr(s.DesiredTags()) + } + + var kubeletArgs *map[string]string + if !maps.Equal(pool.KubeletArgs, s.ManagedMachinePool.ManagedMachinePool.Spec.KubeletArgs) { + updateNeeded = true + kubeletArgs = &s.ManagedMachinePool.ManagedMachinePool.Spec.KubeletArgs + if *kubeletArgs == nil { + kubeletArgs = &map[string]string{} + } + } + + var upgradePolicy *k8s.UpdatePoolRequestUpgradePolicy + desiredPoolUpgradePolicy := s.DesiredPoolUpgradePolicy() + if !poolUpgradePolicyMatchesDesired(pool.UpgradePolicy, desiredPoolUpgradePolicy) { + updateNeeded = true + + upgradePolicy = &k8s.UpdatePoolRequestUpgradePolicy{ + MaxUnavailable: &desiredPoolUpgradePolicy.MaxUnavailable, + MaxSurge: &desiredPoolUpgradePolicy.MaxSurge, + } + } + + if !updateNeeded { + return false, nil + } + + if err := s.ScalewayClient.UpdatePool( + ctx, + pool.ID, + autoscaling, autohealing, + size, minSize, maxSize, + tags, + kubeletArgs, + upgradePolicy, + ); err != nil { + return false, fmt.Errorf("failed to update pool: %w", err) + } + + return true, nil +} + +func poolUpgradePolicyMatchesDesired(current, desired *k8s.PoolUpgradePolicy) bool { + if current == nil || desired == nil { + return true + } + + return current.MaxSurge == desired.MaxSurge && + current.MaxUnavailable == desired.MaxUnavailable +} diff --git a/internal/service/scaleway/k8s/pool/pool_test.go b/internal/service/scaleway/k8s/pool/pool_test.go new file mode 100644 index 0000000..de15043 --- /dev/null +++ b/internal/service/scaleway/k8s/pool/pool_test.go @@ -0,0 +1,367 @@ +package pool + +import ( + "context" + "errors" + "testing" + + . "github.com/onsi/gomega" + "github.com/scaleway/cluster-api-provider-scaleway/api/v1alpha1" + "github.com/scaleway/cluster-api-provider-scaleway/internal/scope" + "github.com/scaleway/cluster-api-provider-scaleway/internal/service/scaleway" + "github.com/scaleway/cluster-api-provider-scaleway/internal/service/scaleway/client" + "github.com/scaleway/cluster-api-provider-scaleway/internal/service/scaleway/client/mock_client" + "github.com/scaleway/scaleway-sdk-go/api/k8s/v1" + "github.com/scaleway/scaleway-sdk-go/scw" + "go.uber.org/mock/gomock" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/exp/api/v1beta1" +) + +const ( + clusterID = "11111111-1111-1111-1111-111111111111" + poolID = "11111111-1111-1111-1111-111111111111" + placementGroupID = "11111111-1111-1111-1111-111111111111" + securityGroupID = "11111111-1111-1111-1111-111111111111" +) + +func TestService_Reconcile(t *testing.T) { + t.Parallel() + type fields struct { + ManagedMachinePool *scope.ManagedMachinePool + } + type args struct { + ctx context.Context + } + tests := []struct { + name string + fields fields + args args + wantErr bool + expect func(i *mock_client.MockInterfaceMockRecorder) + asserts func(g *WithT, s *scope.ManagedMachinePool) + }{ + { + name: "creating pool", + fields: fields{ + ManagedMachinePool: &scope.ManagedMachinePool{ + ManagedControlPlane: &v1alpha1.ScalewayManagedControlPlane{ + Spec: v1alpha1.ScalewayManagedControlPlaneSpec{ + ClusterName: scw.StringPtr("default-controlplane"), + Version: "v1.30.0", + }, + }, + MachinePool: &v1beta1.MachinePool{ + Spec: v1beta1.MachinePoolSpec{ + Replicas: scw.Int32Ptr(2), + Template: clusterv1.MachineTemplateSpec{ + Spec: clusterv1.MachineSpec{ + Version: scw.StringPtr("v1.30.0"), + }, + }, + }, + }, + ManagedMachinePool: &v1alpha1.ScalewayManagedMachinePool{ + ObjectMeta: v1.ObjectMeta{ + Name: "pool", + Namespace: "default", + }, + Spec: v1alpha1.ScalewayManagedMachinePoolSpec{ + Zone: scw.ZoneFrPar1.String(), + PlacementGroupID: scw.StringPtr(placementGroupID), + NodeType: "DEV1-M", + Scaling: &v1alpha1.ScalingSpec{ + Autoscaling: scw.BoolPtr(true), + MinSize: scw.Int32Ptr(1), + MaxSize: scw.Int32Ptr(5), + }, + Autohealing: scw.BoolPtr(true), + UpgradePolicy: &v1alpha1.UpgradePolicySpec{ + MaxUnavailable: scw.Int32Ptr(0), + MaxSurge: scw.Int32Ptr(2), + }, + RootVolumeType: scw.StringPtr("sbs_15k"), + RootVolumeSizeGB: scw.Int64Ptr(42), + PublicIPDisabled: scw.BoolPtr(true), + SecurityGroupID: scw.StringPtr(securityGroupID), + AdditionalTags: []string{"tag1"}, + KubeletArgs: map[string]string{ + "containerLogMaxFiles": "500", + }, + }, + }, + }, + }, + args: args{ + ctx: context.TODO(), + }, + expect: func(i *mock_client.MockInterfaceMockRecorder) { + i.FindCluster(gomock.Any(), "default-controlplane").Return(&k8s.Cluster{ + ID: clusterID, + Status: k8s.ClusterStatusReady, + }, nil) + i.FindPool(gomock.Any(), clusterID, "pool").Return(nil, client.ErrNoItemFound) + i.CreatePool( + gomock.Any(), + scw.Zone("fr-par-1"), + clusterID, + "pool", + "DEV1-M", + scw.StringPtr(placementGroupID), + scw.StringPtr(securityGroupID), + true, + true, + true, + uint32(2), + scw.Uint32Ptr(1), + scw.Uint32Ptr(5), + []string{"caps-namespace=default", "caps-scalewaymanagedmachinepool=pool", "tag1"}, + map[string]string{ + "containerLogMaxFiles": "500", + }, + k8s.PoolVolumeType("sbs_15k"), + scw.Uint64Ptr(42), + &k8s.CreatePoolRequestUpgradePolicy{ + MaxUnavailable: scw.Uint32Ptr(0), + MaxSurge: scw.Uint32Ptr(2), + }, + ).Return(&k8s.Pool{ + ID: poolID, + Status: k8s.PoolStatusReady, + Version: "1.30.0", + NodeType: "DEV1-M", + Autoscaling: true, + Autohealing: true, + PublicIPDisabled: true, + Name: "pool", + Size: 2, + MinSize: 1, + MaxSize: 5, + Tags: []string{"caps-namespace=default", "caps-scalewaymanagedmachinepool=pool", "tag1", "created-by=cluster-api-provider-scaleway"}, + PlacementGroupID: scw.StringPtr(placementGroupID), + SecurityGroupID: securityGroupID, + KubeletArgs: map[string]string{ + "containerLogMaxFiles": "500", + }, + UpgradePolicy: &k8s.PoolUpgradePolicy{ + MaxUnavailable: 0, + MaxSurge: 2, + }, + RootVolumeType: k8s.PoolVolumeTypeSbs15k, + RootVolumeSize: scw.SizePtr(42 * scw.GB), + }, nil) + i.ListNodes(gomock.Any(), clusterID, poolID).Return([]*k8s.Node{ + { + ProviderID: "providerID1", + }, + { + ProviderID: "providerID2", + }, + }, nil) + }, + asserts: func(g *WithT, s *scope.ManagedMachinePool) { + g.Expect(s.ManagedMachinePool.Spec.ProviderIDList).To(Equal([]string{ + "providerID1", "providerID2", + })) + g.Expect(s.ManagedMachinePool.Status.Replicas).To(BeEquivalentTo(2)) + }, + }, + { + name: "pool exists and is up-to-date", + fields: fields{ + ManagedMachinePool: &scope.ManagedMachinePool{ + ManagedControlPlane: &v1alpha1.ScalewayManagedControlPlane{ + Spec: v1alpha1.ScalewayManagedControlPlaneSpec{ + ClusterName: scw.StringPtr("default-controlplane"), + Version: "v1.30.0", + }, + }, + MachinePool: &v1beta1.MachinePool{ + Spec: v1beta1.MachinePoolSpec{ + Replicas: scw.Int32Ptr(2), + Template: clusterv1.MachineTemplateSpec{ + Spec: clusterv1.MachineSpec{ + Version: scw.StringPtr("v1.30.0"), + }, + }, + }, + }, + ManagedMachinePool: &v1alpha1.ScalewayManagedMachinePool{ + ObjectMeta: v1.ObjectMeta{ + Name: "pool", + Namespace: "default", + }, + Spec: v1alpha1.ScalewayManagedMachinePoolSpec{ + Zone: scw.ZoneFrPar1.String(), + PlacementGroupID: scw.StringPtr(placementGroupID), + NodeType: "DEV1-M", + Scaling: &v1alpha1.ScalingSpec{ + Autoscaling: scw.BoolPtr(true), + MinSize: scw.Int32Ptr(1), + MaxSize: scw.Int32Ptr(5), + }, + Autohealing: scw.BoolPtr(true), + UpgradePolicy: &v1alpha1.UpgradePolicySpec{ + MaxUnavailable: scw.Int32Ptr(0), + MaxSurge: scw.Int32Ptr(2), + }, + RootVolumeType: scw.StringPtr("sbs_15k"), + RootVolumeSizeGB: scw.Int64Ptr(42), + PublicIPDisabled: scw.BoolPtr(true), + SecurityGroupID: scw.StringPtr(securityGroupID), + AdditionalTags: []string{"tag1"}, + KubeletArgs: map[string]string{ + "containerLogMaxFiles": "500", + }, + }, + }, + }, + }, + args: args{ + ctx: context.TODO(), + }, + expect: func(i *mock_client.MockInterfaceMockRecorder) { + i.FindCluster(gomock.Any(), "default-controlplane").Return(&k8s.Cluster{ + ID: clusterID, + Status: k8s.ClusterStatusReady, + }, nil) + i.FindPool(gomock.Any(), clusterID, "pool").Return(&k8s.Pool{ + ID: poolID, + Status: k8s.PoolStatusReady, + Version: "1.30.0", + NodeType: "DEV1-M", + Autoscaling: true, + Autohealing: true, + PublicIPDisabled: true, + Name: "pool", + Size: 2, + MinSize: 1, + MaxSize: 5, + Tags: []string{"caps-namespace=default", "caps-scalewaymanagedmachinepool=pool", "tag1", "created-by=cluster-api-provider-scaleway"}, + PlacementGroupID: scw.StringPtr(placementGroupID), + SecurityGroupID: securityGroupID, + KubeletArgs: map[string]string{ + "containerLogMaxFiles": "500", + }, + UpgradePolicy: &k8s.PoolUpgradePolicy{ + MaxUnavailable: 0, + MaxSurge: 2, + }, + RootVolumeType: k8s.PoolVolumeTypeSbs15k, + RootVolumeSize: scw.SizePtr(42 * scw.GB), + }, nil) + i.ListNodes(gomock.Any(), clusterID, poolID).Return([]*k8s.Node{ + { + ProviderID: "providerID1", + }, + { + ProviderID: "providerID2", + }, + }, nil) + }, + asserts: func(g *WithT, s *scope.ManagedMachinePool) { + g.Expect(s.ManagedMachinePool.Spec.ProviderIDList).To(Equal([]string{ + "providerID1", "providerID2", + })) + g.Expect(s.ManagedMachinePool.Status.Replicas).To(BeEquivalentTo(2)) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + g := NewWithT(t) + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + scwMock := mock_client.NewMockInterface(mockCtrl) + + tt.expect(scwMock.EXPECT()) + s := &Service{ + ManagedMachinePool: tt.fields.ManagedMachinePool, + } + s.ManagedMachinePool.ScalewayClient = scwMock + if err := s.Reconcile(tt.args.ctx); (err != nil) != tt.wantErr { + t.Errorf("Service.Reconcile() error = %v, wantErr %v", err, tt.wantErr) + } + + tt.asserts(g, s.ManagedMachinePool) + }) + } +} + +func TestService_Delete(t *testing.T) { + t.Parallel() + type fields struct { + ManagedMachinePool *scope.ManagedMachinePool + } + type args struct { + ctx context.Context + } + tests := []struct { + name string + fields fields + args args + wantErr error + expect func(i *mock_client.MockInterfaceMockRecorder) + }{ + { + name: "delete pool", + fields: fields{ + ManagedMachinePool: &scope.ManagedMachinePool{ + ManagedControlPlane: &v1alpha1.ScalewayManagedControlPlane{ + Spec: v1alpha1.ScalewayManagedControlPlaneSpec{ + ClusterName: scw.StringPtr("default-controlplane"), + Version: "v1.30.0", + }, + }, + ManagedMachinePool: &v1alpha1.ScalewayManagedMachinePool{ + ObjectMeta: v1.ObjectMeta{ + Name: "pool", + Namespace: "default", + }, + }, + }, + }, + args: args{ + ctx: context.TODO(), + }, + wantErr: scaleway.WithTransientError(errors.New("pool is being deleted"), poolRetryTime), + expect: func(i *mock_client.MockInterfaceMockRecorder) { + i.FindCluster(gomock.Any(), "default-controlplane").Return(&k8s.Cluster{ + ID: clusterID, + }, nil) + i.FindPool(gomock.Any(), clusterID, "pool").Return(&k8s.Pool{ + ID: poolID, + }, nil) + i.DeletePool(gomock.Any(), poolID).Return(nil) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + scwMock := mock_client.NewMockInterface(mockCtrl) + + tt.expect(scwMock.EXPECT()) + s := &Service{ + ManagedMachinePool: tt.fields.ManagedMachinePool, + } + s.ScalewayClient = scwMock + err := s.Delete(tt.args.ctx) + if (err == nil) != (tt.wantErr == nil) { + t.Errorf("Service.Reconcile() error = %v, wantErr %v", err, tt.wantErr) + } + if err != nil && tt.wantErr != nil && err.Error() != tt.wantErr.Error() { + t.Errorf("Service.Delete() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} diff --git a/internal/service/scaleway/vpc/vpc.go b/internal/service/scaleway/vpc/vpc.go index f290b6d..1d27289 100644 --- a/internal/service/scaleway/vpc/vpc.go +++ b/internal/service/scaleway/vpc/vpc.go @@ -6,6 +6,7 @@ import ( "fmt" "time" + infrav1 "github.com/scaleway/cluster-api-provider-scaleway/api/v1alpha1" "github.com/scaleway/cluster-api-provider-scaleway/internal/scope" "github.com/scaleway/cluster-api-provider-scaleway/internal/service/scaleway" "github.com/scaleway/cluster-api-provider-scaleway/internal/service/scaleway/client" @@ -13,12 +14,21 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" ) +type Scope interface { + scope.Interface + + HasPrivateNetwork() bool + IsVPCStatusSet() bool + SetVPCStatus(privateNetworkID, VPCID string) + PrivateNetworkParams() infrav1.PrivateNetworkParams +} + type Service struct { - *scope.Cluster + Scope } -func New(clusterScope *scope.Cluster) *Service { - return &Service{Cluster: clusterScope} +func New(s Scope) *Service { + return &Service{s} } func (s Service) Name() string { @@ -26,14 +36,21 @@ func (s Service) Name() string { } func (s *Service) Delete(ctx context.Context) error { - if !s.ShouldManagePrivateNetwork() { + if !s.HasPrivateNetwork() { return nil } - pn, err := s.ScalewayClient.FindPrivateNetwork( + params := s.PrivateNetworkParams() + + // User has provided his private network, we should not touch it. + if params.ID != nil { + return nil + } + + pn, err := s.Cloud().FindPrivateNetwork( ctx, s.ResourceTags(), - s.ScalewayCluster.Spec.Network.PrivateNetwork.VPCID, + params.VPCID, ) if err != nil { if errors.Is(err, client.ErrNoItemFound) { @@ -43,11 +60,11 @@ func (s *Service) Delete(ctx context.Context) error { return fmt.Errorf("failed to find Private Network by name: %w", err) } - if err := s.ScalewayClient.CleanAvailableIPs(ctx, pn.ID); err != nil { + if err := s.Cloud().CleanAvailableIPs(ctx, pn.ID); err != nil { return fmt.Errorf("failed to clean available IPs in IPAM: %w", err) } - if err := s.ScalewayClient.DeletePrivateNetwork(ctx, pn.ID); err != nil { + if err := s.Cloud().DeletePrivateNetwork(ctx, pn.ID); err != nil { // Sometimes, we still need to wait a little for all ressources to be removed // from the Private Network. As a result, we need to handle this error: // scaleway-sdk-go: precondition failed: resource is still in use, Private Network must be empty to be deleted @@ -66,50 +83,49 @@ func (s *Service) Reconcile(ctx context.Context) error { return nil } - if s.ScalewayCluster.Status.Network != nil && - s.ScalewayCluster.Status.Network.PrivateNetworkID != nil && - s.ScalewayCluster.Status.Network.VPCID != nil { - // If the VPC and Private Network IDs are already set in the status, we don't need to do anything. + // If the VPC and Private Network IDs are already set in the status, we don't need to do anything. + if s.IsVPCStatusSet() { return nil } + params := s.PrivateNetworkParams() + var err error var pn *vpc.PrivateNetwork - if s.ShouldManagePrivateNetwork() { - pn, err = s.getOrCreatePN(ctx) + if pnID := params.ID; pnID != nil { + pn, err = s.Cloud().GetPrivateNetwork(ctx, *pnID) if err != nil { - return fmt.Errorf("failed to get or create Private Network: %w", err) + return fmt.Errorf("failed to get existing Private Network: %w", err) } } else { - pn, err = s.ScalewayClient.GetPrivateNetwork(ctx, *s.ScalewayCluster.Spec.Network.PrivateNetwork.ID) + pn, err = s.getOrCreatePN(ctx, params) if err != nil { - return fmt.Errorf("failed to get existing Private Network: %w", err) + return fmt.Errorf("failed to get or create Private Network: %w", err) } } - s.SetStatusPrivateNetworkID(pn.ID) - s.SetStatusVPCID(pn.VpcID) + s.SetVPCStatus(pn.ID, pn.VpcID) return nil } -func (s *Service) getOrCreatePN(ctx context.Context) (*vpc.PrivateNetwork, error) { - pn, err := s.ScalewayClient.FindPrivateNetwork( +func (s *Service) getOrCreatePN(ctx context.Context, params infrav1.PrivateNetworkParams) (*vpc.PrivateNetwork, error) { + pn, err := s.Cloud().FindPrivateNetwork( ctx, s.ResourceTags(), - s.ScalewayCluster.Spec.Network.PrivateNetwork.VPCID, + params.VPCID, ) if err := utilerrors.FilterOut(err, client.IsNotFoundError); err != nil { return nil, err } if pn == nil { - pn, err = s.ScalewayClient.CreatePrivateNetwork( + pn, err = s.Cloud().CreatePrivateNetwork( ctx, s.ResourceName(), - s.ScalewayCluster.Spec.Network.PrivateNetwork.VPCID, - s.ScalewayCluster.Spec.Network.PrivateNetwork.Subnet, + params.VPCID, + params.Subnet, s.ResourceTags(), ) if err != nil { diff --git a/internal/service/scaleway/vpc/vpc_test.go b/internal/service/scaleway/vpc/vpc_test.go index 371b61d..dbfa4d2 100644 --- a/internal/service/scaleway/vpc/vpc_test.go +++ b/internal/service/scaleway/vpc/vpc_test.go @@ -23,7 +23,7 @@ const ( func TestService_Reconcile(t *testing.T) { t.Parallel() type fields struct { - Cluster *scope.Cluster + Scope } type args struct { ctx context.Context @@ -34,12 +34,12 @@ func TestService_Reconcile(t *testing.T) { args args wantErr bool expect func(i *mock_client.MockInterfaceMockRecorder) - asserts func(g *WithT, c *scope.Cluster) + asserts func(g *WithT, s Scope) }{ { name: "no private network", fields: fields{ - Cluster: &scope.Cluster{ + Scope: &scope.Cluster{ ScalewayCluster: &v1alpha1.ScalewayCluster{}, }, }, @@ -47,12 +47,12 @@ func TestService_Reconcile(t *testing.T) { ctx: context.TODO(), }, expect: func(i *mock_client.MockInterfaceMockRecorder) {}, - asserts: func(g *WithT, c *scope.Cluster) {}, + asserts: func(g *WithT, c Scope) {}, }, { name: "IDs already set in status", fields: fields{ - Cluster: &scope.Cluster{ + Scope: &scope.Cluster{ ScalewayCluster: &v1alpha1.ScalewayCluster{ Spec: v1alpha1.ScalewayClusterSpec{ Network: &v1alpha1.NetworkSpec{ @@ -74,12 +74,12 @@ func TestService_Reconcile(t *testing.T) { ctx: context.TODO(), }, expect: func(i *mock_client.MockInterfaceMockRecorder) {}, - asserts: func(g *WithT, c *scope.Cluster) {}, + asserts: func(g *WithT, s Scope) {}, }, { name: "managed private network", fields: fields{ - Cluster: &scope.Cluster{ + Scope: &scope.Cluster{ ScalewayCluster: &v1alpha1.ScalewayCluster{ ObjectMeta: v1.ObjectMeta{ Name: "cluster", @@ -111,16 +111,18 @@ func TestService_Reconcile(t *testing.T) { DHCPEnabled: true, }, nil) }, - asserts: func(g *WithT, c *scope.Cluster) { - g.Expect(c.ScalewayCluster.Status.Network).NotTo(BeNil()) - g.Expect(c.ScalewayCluster.Status.Network.PrivateNetworkID).To(Equal(scw.StringPtr(privateNetworkID))) - g.Expect(c.ScalewayCluster.Status.Network.VPCID).To(Equal(scw.StringPtr(vpcID))) + asserts: func(g *WithT, s Scope) { + clusterScope, ok := s.(*scope.Cluster) + g.Expect(ok).To(BeTrue()) + g.Expect(clusterScope.ScalewayCluster.Status.Network).NotTo(BeNil()) + g.Expect(clusterScope.ScalewayCluster.Status.Network.PrivateNetworkID).To(Equal(scw.StringPtr(privateNetworkID))) + g.Expect(clusterScope.ScalewayCluster.Status.Network.VPCID).To(Equal(scw.StringPtr(vpcID))) }, }, { name: "existing private network", fields: fields{ - Cluster: &scope.Cluster{ + Scope: &scope.Cluster{ ScalewayCluster: &v1alpha1.ScalewayCluster{ ObjectMeta: v1.ObjectMeta{ Name: "cluster", @@ -130,7 +132,9 @@ func TestService_Reconcile(t *testing.T) { Network: &v1alpha1.NetworkSpec{ PrivateNetwork: &v1alpha1.PrivateNetworkSpec{ Enabled: true, - ID: scw.StringPtr(privateNetworkID), + PrivateNetworkParams: v1alpha1.PrivateNetworkParams{ + ID: scw.StringPtr(privateNetworkID), + }, }, }, }, @@ -146,10 +150,13 @@ func TestService_Reconcile(t *testing.T) { VpcID: vpcID, }, nil) }, - asserts: func(g *WithT, c *scope.Cluster) { - g.Expect(c.ScalewayCluster.Status.Network).NotTo(BeNil()) - g.Expect(c.ScalewayCluster.Status.Network.PrivateNetworkID).To(Equal(scw.StringPtr(privateNetworkID))) - g.Expect(c.ScalewayCluster.Status.Network.VPCID).To(Equal(scw.StringPtr(vpcID))) + asserts: func(g *WithT, s Scope) { + clusterScope, ok := s.(*scope.Cluster) + g.Expect(ok).To(BeTrue()) + + g.Expect(clusterScope.ScalewayCluster.Status.Network).NotTo(BeNil()) + g.Expect(clusterScope.ScalewayCluster.Status.Network.PrivateNetworkID).To(Equal(scw.StringPtr(privateNetworkID))) + g.Expect(clusterScope.ScalewayCluster.Status.Network.VPCID).To(Equal(scw.StringPtr(vpcID))) }, }, } @@ -166,14 +173,14 @@ func TestService_Reconcile(t *testing.T) { tt.expect(scwMock.EXPECT()) s := &Service{ - Cluster: tt.fields.Cluster, + Scope: tt.fields.Scope, } - s.ScalewayClient = scwMock + s.SetCloud(scwMock) if err := s.Reconcile(tt.args.ctx); (err != nil) != tt.wantErr { t.Errorf("Service.Reconcile() error = %v, wantErr %v", err, tt.wantErr) } - tt.asserts(g, s.Cluster) + tt.asserts(g, s.Scope) }) } } @@ -181,7 +188,7 @@ func TestService_Reconcile(t *testing.T) { func TestService_Delete(t *testing.T) { t.Parallel() type fields struct { - Cluster *scope.Cluster + Scope } type args struct { ctx context.Context @@ -196,7 +203,7 @@ func TestService_Delete(t *testing.T) { { name: "no private network", fields: fields{ - Cluster: &scope.Cluster{ + Scope: &scope.Cluster{ ScalewayCluster: &v1alpha1.ScalewayCluster{}, }, }, @@ -208,7 +215,7 @@ func TestService_Delete(t *testing.T) { { name: "find and delete", fields: fields{ - Cluster: &scope.Cluster{ + Scope: &scope.Cluster{ ScalewayCluster: &v1alpha1.ScalewayCluster{ ObjectMeta: v1.ObjectMeta{ Name: "cluster", @@ -240,6 +247,33 @@ func TestService_Delete(t *testing.T) { i.DeletePrivateNetwork(gomock.Any(), privateNetworkID) }, }, + { + name: "do not remove user-provided private network", + fields: fields{ + Scope: &scope.Cluster{ + ScalewayCluster: &v1alpha1.ScalewayCluster{ + ObjectMeta: v1.ObjectMeta{ + Name: "cluster", + Namespace: "default", + }, + Spec: v1alpha1.ScalewayClusterSpec{ + Network: &v1alpha1.NetworkSpec{ + PrivateNetwork: &v1alpha1.PrivateNetworkSpec{ + Enabled: true, + PrivateNetworkParams: v1alpha1.PrivateNetworkParams{ + ID: scw.StringPtr(privateNetworkID), + }, + }, + }, + }, + }, + }, + }, + args: args{ + ctx: context.TODO(), + }, + expect: func(i *mock_client.MockInterfaceMockRecorder) {}, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -253,9 +287,9 @@ func TestService_Delete(t *testing.T) { tt.expect(scwMock.EXPECT()) s := &Service{ - Cluster: tt.fields.Cluster, + Scope: tt.fields.Scope, } - s.ScalewayClient = scwMock + s.SetCloud(scwMock) if err := s.Delete(tt.args.ctx); (err != nil) != tt.wantErr { t.Errorf("Service.Delete() error = %v, wantErr %v", err, tt.wantErr) } diff --git a/internal/service/scaleway/vpcgw/vpcgw.go b/internal/service/scaleway/vpcgw/vpcgw.go index fac65fc..bdf4b85 100644 --- a/internal/service/scaleway/vpcgw/vpcgw.go +++ b/internal/service/scaleway/vpcgw/vpcgw.go @@ -21,12 +21,19 @@ import ( // Gateway deletion. const capsManagedIPTag = "caps-vpcgw-ip=managed" +type Scope interface { + scope.Interface + + HasPrivateNetwork() bool + PrivateNetworkID() (string, error) + PublicGateways() []infrav1.PublicGatewaySpec +} type Service struct { - *scope.Cluster + Scope } -func New(clusterScope *scope.Cluster) *Service { - return &Service{Cluster: clusterScope} +func New(s Scope) *Service { + return &Service{s} } func (s Service) Name() string { @@ -37,11 +44,11 @@ func (s *Service) ensureGateways(ctx context.Context, delete bool) ([]*vpcgw.Gat var desired []infrav1.PublicGatewaySpec // When delete is set, we ensure an empty list of Gateways to remove everything. if !delete { - desired = s.ScalewayCluster.Spec.Network.PublicGateways + desired = s.PublicGateways() } drle := &common.ResourceEnsurer[infrav1.PublicGatewaySpec, *vpcgw.Gateway]{ - ResourceReconciler: &desiredResourceListManager{s.Cluster, make(map[scw.Zone][]string)}, + ResourceReconciler: &desiredResourceListManager{s.Scope, make(map[scw.Zone][]string)}, } return drle.Do(ctx, desired) } @@ -61,7 +68,7 @@ func (s *Service) ensureGatewaysAttachment(ctx context.Context, gateways []*vpcg ) } - if err := s.ScalewayClient.CreateGatewayNetwork(ctx, gateway.Zone, gateway.ID, pnID); err != nil { + if err := s.Cloud().CreateGatewayNetwork(ctx, gateway.Zone, gateway.ID, pnID); err != nil { return fmt.Errorf("failed to create gateway network for gateway %s: %w", gateway.ID, err) } } @@ -105,19 +112,19 @@ func (s *Service) Delete(ctx context.Context) error { } type desiredResourceListManager struct { - *scope.Cluster + Scope gatewayTypesCache map[scw.Zone][]string } func (d *desiredResourceListManager) ListResources(ctx context.Context) ([]*vpcgw.Gateway, error) { - return d.ScalewayClient.FindGateways(ctx, d.ResourceTags()) + return d.Cloud().FindGateways(ctx, d.ResourceTags()) } func (d *desiredResourceListManager) DeleteResource(ctx context.Context, resource *vpcgw.Gateway) error { logf.FromContext(ctx).Info("Deleting Gateway", "gatewayName", resource.Name, "zone", resource.Zone) - if err := d.ScalewayClient.DeleteGateway( + if err := d.Cloud().DeleteGateway( ctx, resource.Zone, resource.ID, @@ -142,7 +149,7 @@ func (d *desiredResourceListManager) UpdateResource( if canUpgradeType { logf.FromContext(ctx).Info("Upgrading Gateway", "gatewayName", resource.Name, "zone", resource.Zone) - return d.ScalewayClient.UpgradeGateway(ctx, resource.Zone, resource.ID, *desired.Type) + return d.Cloud().UpgradeGateway(ctx, resource.Zone, resource.ID, *desired.Type) } } @@ -158,7 +165,7 @@ func (d *desiredResourceListManager) GetResourceName(resource *vpcgw.Gateway) st } func (d *desiredResourceListManager) GetDesiredZone(desired infrav1.PublicGatewaySpec) (scw.Zone, error) { - return d.ScalewayClient.GetZoneOrDefault(desired.Zone) + return d.Cloud().GetZoneOrDefault(desired.Zone) } func (d *desiredResourceListManager) ShouldKeepResource( @@ -208,7 +215,7 @@ func (d *desiredResourceListManager) CreateResource( tags := d.ResourceTags() if desired.IP != nil { - ip, err := d.ScalewayClient.FindGatewayIP(ctx, zone, *desired.IP) + ip, err := d.Cloud().FindGatewayIP(ctx, zone, *desired.IP) if err != nil { if client.IsNotFoundError(err) { return nil, scaleway.WithTerminalError(fmt.Errorf("failed to find gateway ip: %w", err)) @@ -228,7 +235,7 @@ func (d *desiredResourceListManager) CreateResource( logf.FromContext(ctx).Info("Creating Gateway", "gatewayName", name, "zone", zone) - gateway, err := d.ScalewayClient.CreateGateway(ctx, zone, name, gwType, tags, ipID) + gateway, err := d.Cloud().CreateGateway(ctx, zone, name, gwType, tags, ipID) if err != nil { return nil, fmt.Errorf("failed to create gateway: %w", err) } @@ -240,7 +247,7 @@ func (d *desiredResourceListManager) canUpgradeType(ctx context.Context, zone sc types, ok := d.gatewayTypesCache[zone] if !ok { var err error - types, err = d.ScalewayClient.ListGatewayTypes(ctx, zone) + types, err = d.Cloud().ListGatewayTypes(ctx, zone) if err != nil { return false, err } diff --git a/internal/service/scaleway/vpcgw/vpcgw_test.go b/internal/service/scaleway/vpcgw/vpcgw_test.go index 51454c5..1fcba54 100644 --- a/internal/service/scaleway/vpcgw/vpcgw_test.go +++ b/internal/service/scaleway/vpcgw/vpcgw_test.go @@ -104,7 +104,7 @@ func Test_canUpgradeTypes(t *testing.T) { func TestService_Reconcile(t *testing.T) { t.Parallel() type fields struct { - Cluster *scope.Cluster + Scope } type args struct { ctx context.Context @@ -119,7 +119,7 @@ func TestService_Reconcile(t *testing.T) { { name: "no private network", fields: fields{ - Cluster: &scope.Cluster{ + Scope: &scope.Cluster{ ScalewayCluster: &v1alpha1.ScalewayCluster{}, }, }, @@ -128,7 +128,7 @@ func TestService_Reconcile(t *testing.T) { { name: "no gateway configured", fields: fields{ - Cluster: &scope.Cluster{ + Scope: &scope.Cluster{ ScalewayCluster: &v1alpha1.ScalewayCluster{ ObjectMeta: v1.ObjectMeta{ Name: "cluster", @@ -157,7 +157,7 @@ func TestService_Reconcile(t *testing.T) { { name: "no gateway configured: delete existing", fields: fields{ - Cluster: &scope.Cluster{ + Scope: &scope.Cluster{ ScalewayCluster: &v1alpha1.ScalewayCluster{ ObjectMeta: v1.ObjectMeta{ Name: "cluster", @@ -191,7 +191,7 @@ func TestService_Reconcile(t *testing.T) { { name: "gateways configured: up-to-date", fields: fields{ - Cluster: &scope.Cluster{ + Scope: &scope.Cluster{ ScalewayCluster: &v1alpha1.ScalewayCluster{ ObjectMeta: v1.ObjectMeta{ Name: "cluster", @@ -259,7 +259,7 @@ func TestService_Reconcile(t *testing.T) { { name: "gateways configured: create missing", fields: fields{ - Cluster: &scope.Cluster{ + Scope: &scope.Cluster{ ScalewayCluster: &v1alpha1.ScalewayCluster{ ObjectMeta: v1.ObjectMeta{ Name: "cluster", @@ -350,7 +350,7 @@ func TestService_Reconcile(t *testing.T) { { name: "gateways configured: upgrade", fields: fields{ - Cluster: &scope.Cluster{ + Scope: &scope.Cluster{ ScalewayCluster: &v1alpha1.ScalewayCluster{ ObjectMeta: v1.ObjectMeta{ Name: "cluster", @@ -444,9 +444,9 @@ func TestService_Reconcile(t *testing.T) { tt.expect(scwMock.EXPECT()) s := &Service{ - Cluster: tt.fields.Cluster, + Scope: tt.fields.Scope, } - s.ScalewayClient = scwMock + s.SetCloud(scwMock) if err := s.Reconcile(tt.args.ctx); (err != nil) != tt.wantErr { t.Errorf("Service.Reconcile() error = %v, wantErr %v", err, tt.wantErr) } @@ -457,7 +457,7 @@ func TestService_Reconcile(t *testing.T) { func TestService_Delete(t *testing.T) { t.Parallel() type fields struct { - Cluster *scope.Cluster + Scope } type args struct { ctx context.Context @@ -472,7 +472,7 @@ func TestService_Delete(t *testing.T) { { name: "no private network", fields: fields{ - Cluster: &scope.Cluster{ + Scope: &scope.Cluster{ ScalewayCluster: &v1alpha1.ScalewayCluster{}, }, }, @@ -481,7 +481,7 @@ func TestService_Delete(t *testing.T) { { name: "delete gateways", fields: fields{ - Cluster: &scope.Cluster{ + Scope: &scope.Cluster{ ScalewayCluster: &v1alpha1.ScalewayCluster{ ObjectMeta: v1.ObjectMeta{ Name: "cluster", @@ -558,9 +558,9 @@ func TestService_Delete(t *testing.T) { tt.expect(scwMock.EXPECT()) s := &Service{ - Cluster: tt.fields.Cluster, + Scope: tt.fields.Scope, } - s.ScalewayClient = scwMock + s.SetCloud(scwMock) if err := s.Delete(tt.args.ctx); (err != nil) != tt.wantErr { t.Errorf("Service.Delete() error = %v, wantErr %v", err, tt.wantErr) } diff --git a/templates/cluster-template-managed.yaml b/templates/cluster-template-managed.yaml new file mode 100644 index 0000000..17e628c --- /dev/null +++ b/templates/cluster-template-managed.yaml @@ -0,0 +1,73 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: ${CLUSTER_NAME} +spec: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ScalewayManagedCluster + name: ${CLUSTER_NAME} + controlPlaneRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ScalewayManagedControlPlane + name: ${CLUSTER_NAME}-control-plane +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ScalewayManagedCluster +metadata: + name: ${CLUSTER_NAME} +spec: + projectID: ${SCW_PROJECT_ID} + region: ${SCW_REGION} + scalewaySecretName: ${CLUSTER_NAME} + network: + privateNetwork: + id: ${PRIVATE_NETWORK_ID:=null} + vpcID: ${VPC_ID:=null} + publicGateways: ${PUBLIC_GATEWAYS:=null} +--- +apiVersion: v1 +kind: Secret +metadata: + name: ${CLUSTER_NAME} +type: Opaque +stringData: + SCW_ACCESS_KEY: ${SCW_ACCESS_KEY} + SCW_SECRET_KEY: ${SCW_SECRET_KEY} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ScalewayManagedControlPlane +metadata: + name: ${CLUSTER_NAME}-control-plane +spec: + type: ${CLUSTER_TYPE:=kapsule} + version: ${KUBERNETES_VERSION} + onDelete: + withAdditionalResources: true +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachinePool +metadata: + name: "${CLUSTER_NAME}-mp-0" +spec: + clusterName: "${CLUSTER_NAME}" + replicas: ${WORKER_MACHINE_COUNT} + template: + spec: + bootstrap: + dataSecretName: "" + clusterName: "${CLUSTER_NAME}" + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: ScalewayManagedMachinePool + name: "${CLUSTER_NAME}-mp-0" + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: ScalewayManagedMachinePool +metadata: + name: "${CLUSTER_NAME}-mp-0" +spec: + nodeType: ${WORKER_MACHINE_COMMERCIAL_TYPE:=PLAY2-NANO} + zone: ${WORKER_FAILURE_DOMAIN:=${SCW_REGION}-1} + publicIPDisabled: ${WORKER_PUBLIC_IP_DISABLED:=false} diff --git a/test/e2e/caps.go b/test/e2e/caps.go index 63f19b5..fbff362 100644 --- a/test/e2e/caps.go +++ b/test/e2e/caps.go @@ -83,6 +83,17 @@ func CAPSClusterDeploymentSpec(inputGetter func() CAPSClusterDeploymentSpecInput AfterEach(func() { // Dumps all the resources in the spec namespace, then cleanups the cluster object and the spec namespace itself. - dumpSpecResourcesAndCleanup(ctx, specName, input.BootstrapClusterProxy, input.ArtifactFolder, input.ClusterctlConfigPath, namespace, cancelWatches, clusterResources.Cluster, input.E2EConfig.GetIntervals, input.SkipCleanup) + cleanInput := cleanupInput{ + SpecName: specName, + Cluster: clusterResources.Cluster, + ClusterProxy: input.BootstrapClusterProxy, + ClusterctlConfigPath: input.ClusterctlConfigPath, + Namespace: namespace, + CancelWatches: cancelWatches, + IntervalsGetter: input.E2EConfig.GetIntervals, + SkipCleanup: input.SkipCleanup, + ArtifactFolder: input.ArtifactFolder, + } + dumpSpecResourcesAndCleanup(ctx, cleanInput) }) } diff --git a/test/e2e/common.go b/test/e2e/common.go index e0672fe..f508b64 100644 --- a/test/e2e/common.go +++ b/test/e2e/common.go @@ -28,49 +28,63 @@ func setupSpecNamespace(ctx context.Context, specName string, clusterProxy frame return namespace, cancelWatches } -func dumpSpecResourcesAndCleanup(ctx context.Context, specName string, clusterProxy framework.ClusterProxy, artifactFolder, clusterctlConfigPath string, namespace *corev1.Namespace, cancelWatches context.CancelFunc, cluster *clusterv1.Cluster, intervalsGetter func(spec, key string) []any, skipCleanup bool) { - var clusterName string - var clusterNamespace string - if cluster != nil { - clusterName = cluster.Name - clusterNamespace = cluster.Namespace - Byf("Dumping logs from the %q workload cluster", clusterName) - - // Dump all the logs from the workload cluster before deleting them. - clusterProxy.CollectWorkloadClusterLogs(ctx, clusterNamespace, clusterName, filepath.Join(artifactFolder, "clusters", clusterName)) +type cleanupInput struct { + SpecName string + ClusterProxy framework.ClusterProxy + ArtifactFolder string + ClusterctlConfigPath string + Namespace *corev1.Namespace + CancelWatches context.CancelFunc + Cluster *clusterv1.Cluster + IntervalsGetter func(spec, key string) []interface{} + SkipCleanup bool + AdditionalCleanup func() +} - Byf("Dumping all the Cluster API resources in the %q namespace", namespace.Name) +func dumpSpecResourcesAndCleanup(ctx context.Context, input cleanupInput) { + defer func() { + input.CancelWatches() + }() - // Dump all Cluster API related resources to artifacts before deleting them. - framework.DumpAllResources(ctx, framework.DumpAllResourcesInput{ - Lister: clusterProxy.GetClient(), - KubeConfigPath: clusterProxy.GetKubeconfigPath(), - ClusterctlConfigPath: clusterctlConfigPath, - Namespace: namespace.Name, - LogPath: filepath.Join(artifactFolder, "clusters", clusterProxy.GetName(), "resources"), - }) + if input.Cluster == nil { + By("Unable to dump workload cluster logs as the cluster is nil") } else { - clusterName = "empty" - clusterNamespace = "empty" + Byf("Dumping logs from the %q workload cluster", input.Cluster.Name) + input.ClusterProxy.CollectWorkloadClusterLogs(ctx, input.Cluster.Namespace, input.Cluster.Name, filepath.Join(input.ArtifactFolder, "clusters", input.Cluster.Name)) + } + + Byf("Dumping all the Cluster API resources in the %q namespace", input.Namespace.Name) + // Dump all Cluster API related resources to artifacts before deleting them. + framework.DumpAllResources(ctx, framework.DumpAllResourcesInput{ + Lister: input.ClusterProxy.GetClient(), + KubeConfigPath: input.ClusterProxy.GetKubeconfigPath(), + ClusterctlConfigPath: input.ClusterctlConfigPath, + Namespace: input.Namespace.Name, + LogPath: filepath.Join(input.ArtifactFolder, "clusters", input.ClusterProxy.GetName(), "resources"), + }) + + if input.SkipCleanup { + return } - if !skipCleanup { - Byf("Deleting cluster %s/%s", clusterNamespace, clusterName) - // While https://github.com/kubernetes-sigs/cluster-api/issues/2955 is addressed in future iterations, there is a chance - // that cluster variable is not set even if the cluster exists, so we are calling DeleteAllClustersAndWait - // instead of DeleteClusterAndWait - framework.DeleteAllClustersAndWait(ctx, framework.DeleteAllClustersAndWaitInput{ - ClusterProxy: clusterProxy, - ClusterctlConfigPath: clusterctlConfigPath, - Namespace: namespace.Name, - ArtifactFolder: artifactFolder, - }, intervalsGetter(specName, "wait-delete-cluster")...) + Byf("Deleting all clusters in the %s namespace", input.Namespace.Name) + // While https://github.com/kubernetes-sigs/cluster-api/issues/2955 is addressed in future iterations, there is a chance + // that cluster variable is not set even if the cluster exists, so we are calling DeleteAllClustersAndWait + // instead of DeleteClusterAndWait + framework.DeleteAllClustersAndWait(ctx, framework.DeleteAllClustersAndWaitInput{ + ClusterProxy: input.ClusterProxy, + ClusterctlConfigPath: input.ClusterctlConfigPath, + Namespace: input.Namespace.Name, + }, input.IntervalsGetter(input.SpecName, "wait-delete-cluster")...) + + Byf("Deleting namespace used for hosting the %q test spec", input.SpecName) + framework.DeleteNamespace(ctx, framework.DeleteNamespaceInput{ + Deleter: input.ClusterProxy.GetClient(), + Name: input.Namespace.Name, + }) - Byf("Deleting namespace used for hosting the %q test spec", specName) - framework.DeleteNamespace(ctx, framework.DeleteNamespaceInput{ - Deleter: clusterProxy.GetClient(), - Name: namespace.Name, - }) + if input.AdditionalCleanup != nil { + Byf("Running additional cleanup for the %q test spec", input.SpecName) + input.AdditionalCleanup() } - cancelWatches() } diff --git a/test/e2e/config/scaleway.yaml b/test/e2e/config/scaleway.yaml index ff05771..14187ac 100644 --- a/test/e2e/config/scaleway.yaml +++ b/test/e2e/config/scaleway.yaml @@ -46,6 +46,7 @@ providers: # Following files are built using `make generate-e2e`. - sourcePath: "../data/infrastructure-scaleway/v1beta1/cluster-template.yaml" - sourcePath: "../data/infrastructure-scaleway/v1beta1/cluster-template-private-network.yaml" + - sourcePath: "../data/infrastructure-scaleway/v1beta1/cluster-template-managed.yaml" replacements: - old: "imagePullPolicy: Always" new: "imagePullPolicy: IfNotPresent" @@ -68,4 +69,5 @@ intervals: default/wait-control-plane: ["20m", "10s"] default/wait-worker-nodes: ["25m", "10s"] default/wait-machine-pool-nodes: ["30m", "10s"] + default/wait-worker-machine-pools: ["30m", "10s"] default/wait-delete-cluster: ["30m", "10s"] diff --git a/test/e2e/data/infrastructure-scaleway/v1beta1/cluster-template-managed/kustomization.yaml b/test/e2e/data/infrastructure-scaleway/v1beta1/cluster-template-managed/kustomization.yaml new file mode 100644 index 0000000..b8705e5 --- /dev/null +++ b/test/e2e/data/infrastructure-scaleway/v1beta1/cluster-template-managed/kustomization.yaml @@ -0,0 +1,4 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization +resources: +- ../../../../../../templates/cluster-template-managed.yaml diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index c12d250..fa8b3ef 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -155,7 +155,13 @@ func loadE2EConfig(configPath string) *clusterctl.E2EConfig { config := clusterctl.LoadE2EConfig(context.TODO(), clusterctl.LoadE2EConfigInput{ConfigPath: configPath}) Expect(config).NotTo(BeNil(), "Failed to load E2E config from %s", configPath) - Expect(config.Variables).ToNot(ContainElement("null"), "Please set the missing variables using environment variables") + nullVariables := make(map[string]string) + for k, v := range config.Variables { + if v == "null" { + nullVariables[k] = v + } + } + Expect(nullVariables).To(BeEmpty(), "Please set the missing variables using environment variables") return config } diff --git a/test/e2e/managed.go b/test/e2e/managed.go new file mode 100644 index 0000000..cf32a44 --- /dev/null +++ b/test/e2e/managed.go @@ -0,0 +1,188 @@ +package e2e + +import ( + "context" + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/client" + + infrav1 "github.com/scaleway/cluster-api-provider-scaleway/api/v1alpha1" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" +) + +const ( + retryableOperationInterval = 3 * time.Second + retryableOperationTimeout = 3 * time.Minute +) + +// ApplyManagedClusterTemplateAndWaitInput is the input type for ApplyManagedClusterTemplateAndWait. +type ApplyManagedClusterTemplateAndWaitInput struct { + ClusterProxy framework.ClusterProxy + ConfigCluster clusterctl.ConfigClusterInput + WaitForClusterIntervals []interface{} + WaitForControlPlaneIntervals []interface{} + WaitForMachinePools []interface{} + Options []framework.CreateOrUpdateOption + PreWaitForCluster func() + PostMachinesProvisioned func() + WaitForControlPlaneInitialized Waiter +} + +// Waiter is a function that runs and waits for a long-running operation to finish and updates the result. +type Waiter func(ctx context.Context, input ApplyManagedClusterTemplateAndWaitInput, result *ApplyManagedClusterTemplateAndWaitResult) + +// ApplyManagedClusterTemplateAndWaitResult is the output type for ApplyClusterTemplateAndWait. +type ApplyManagedClusterTemplateAndWaitResult struct { + ClusterClass *clusterv1.ClusterClass + Cluster *clusterv1.Cluster + ControlPlane *infrav1.ScalewayManagedControlPlane + MachinePools []*expv1.MachinePool +} + +// ApplyManagedClusterTemplateAndWait gets a managed cluster template using clusterctl, and waits for the cluster to be ready. +// Important! this method assumes the cluster uses a ScalewayManagedControlPlane and MachinePools. +func ApplyManagedClusterTemplateAndWait(ctx context.Context, input ApplyManagedClusterTemplateAndWaitInput, result *ApplyManagedClusterTemplateAndWaitResult) { + setDefaults(&input) + Expect(ctx).NotTo(BeNil(), "ctx is required for ApplyManagedClusterTemplateAndWait") + Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling ApplyManagedClusterTemplateAndWait") + Expect(result).ToNot(BeNil(), "Invalid argument. result can't be nil when calling ApplyManagedClusterTemplateAndWait") + Expect(input.ConfigCluster.Flavor).ToNot(BeEmpty(), "Invalid argument. input.ConfigCluster.Flavor can't be empty") + Expect(input.ConfigCluster.ControlPlaneMachineCount).ToNot(BeNil()) + Expect(input.ConfigCluster.WorkerMachineCount).ToNot(BeNil()) + + By(fmt.Sprintf("Creating the Scaleway managed workload cluster with name %q using the %q template (Kubernetes %s)", input.ConfigCluster.ClusterName, input.ConfigCluster.Flavor, input.ConfigCluster.KubernetesVersion)) + + By("Getting the cluster template yaml") + workloadClusterTemplate := clusterctl.ConfigCluster(ctx, clusterctl.ConfigClusterInput{ + // pass reference to the management cluster hosting this test + KubeconfigPath: input.ConfigCluster.KubeconfigPath, + // pass the clusterctl config file that points to the local provider repository created for this test, + ClusterctlConfigPath: input.ConfigCluster.ClusterctlConfigPath, + // select template + Flavor: input.ConfigCluster.Flavor, + // define template variables + Namespace: input.ConfigCluster.Namespace, + ClusterName: input.ConfigCluster.ClusterName, + KubernetesVersion: input.ConfigCluster.KubernetesVersion, + ControlPlaneMachineCount: input.ConfigCluster.ControlPlaneMachineCount, + WorkerMachineCount: input.ConfigCluster.WorkerMachineCount, + InfrastructureProvider: input.ConfigCluster.InfrastructureProvider, + // setup clusterctl logs folder + LogFolder: input.ConfigCluster.LogFolder, + ClusterctlVariables: input.ConfigCluster.ClusterctlVariables, + }) + Expect(workloadClusterTemplate).ToNot(BeNil(), "Failed to get the cluster template") + + By("Applying the cluster template yaml to the cluster") + Eventually(func() error { + return input.ClusterProxy.CreateOrUpdate(ctx, workloadClusterTemplate, input.Options...) + }, 10*time.Second).Should(Succeed(), "Failed to apply the cluster template") + + // Once we applied the cluster template we can run PreWaitForCluster. + // Note: This can e.g. be used to verify the BeforeClusterCreate lifecycle hook is executed + // and blocking correctly. + if input.PreWaitForCluster != nil { + By("Calling PreWaitForCluster") + input.PreWaitForCluster() + } + + By("Waiting for the cluster infrastructure to be provisioned") + result.Cluster = framework.DiscoveryAndWaitForCluster(ctx, framework.DiscoveryAndWaitForClusterInput{ + Getter: input.ClusterProxy.GetClient(), + Namespace: input.ConfigCluster.Namespace, + Name: input.ConfigCluster.ClusterName, + }, input.WaitForClusterIntervals...) + + By("Waiting for managed control plane to be initialized") + input.WaitForControlPlaneInitialized(ctx, input, result) + + By("Waiting for the machine pools to be provisioned") + result.MachinePools = framework.DiscoveryAndWaitForMachinePools(ctx, framework.DiscoveryAndWaitForMachinePoolsInput{ + Getter: input.ClusterProxy.GetClient(), + Lister: input.ClusterProxy.GetClient(), + Cluster: result.Cluster, + }, input.WaitForMachinePools...) + + if input.PostMachinesProvisioned != nil { + By("Calling PostMachinesProvisioned") + input.PostMachinesProvisioned() + } +} + +type ManagedControlPlaneResult struct { + clusterctl.ApplyClusterTemplateAndWaitResult + + ManagedControlPlane *infrav1.ScalewayManagedControlPlane +} + +// DiscoveryAndWaitFoManagedControlPlaneInitializedInput is the input type for DiscoveryAndWaitForManagedControlPlaneInitialized. +type DiscoveryAndWaitForManagedControlPlaneInitializedInput struct { + Lister framework.Lister + Cluster *clusterv1.Cluster +} + +// DiscoveryAndWaitForManagedControlPlaneInitialized discovers the KubeadmControlPlane object attached to a cluster and waits for it to be initialized. +func DiscoveryAndWaitForManagedControlPlaneInitialized(ctx context.Context, input DiscoveryAndWaitForManagedControlPlaneInitializedInput, intervals ...interface{}) *infrav1.ScalewayManagedControlPlane { + Expect(ctx).NotTo(BeNil(), "ctx is required for DiscoveryAndWaitForManagedControlPlaneInitialized") + Expect(input.Lister).ToNot(BeNil(), "Invalid argument. input.Lister can't be nil when calling DiscoveryAndWaitForManagedControlPlaneInitialized") + Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling DiscoveryAndWaitForManagedControlPlaneInitialized") + + By("Getting ScalewayManagedControlPlane control plane") + + var controlPlane *infrav1.ScalewayManagedControlPlane + Eventually(func(g Gomega) { + controlPlane = GetManagedControlPlaneByCluster(ctx, GetManagedControlPlaneByClusterInput{ + Lister: input.Lister, + ClusterName: input.Cluster.Name, + Namespace: input.Cluster.Namespace, + }) + g.Expect(controlPlane).ToNot(BeNil()) + }, "10s", "1s").Should(Succeed(), "Couldn't get the control plane for the cluster %s", klog.KObj(input.Cluster)) + + return controlPlane +} + +// GetManagedontrolPlaneByClusterInput is the input for GetManagedControlPlaneByCluster. +type GetManagedControlPlaneByClusterInput struct { + Lister framework.Lister + ClusterName string + Namespace string +} + +// GetManagedControlPlaneByCluster returns the ScalewayManagedControlPlane objects for a cluster. +func GetManagedControlPlaneByCluster(ctx context.Context, input GetManagedControlPlaneByClusterInput) *infrav1.ScalewayManagedControlPlane { + opts := []client.ListOption{ + client.InNamespace(input.Namespace), + client.MatchingLabels{ + clusterv1.ClusterNameLabel: input.ClusterName, + }, + } + + controlPlaneList := &infrav1.ScalewayManagedControlPlaneList{} + Eventually(func() error { + return input.Lister.List(ctx, controlPlaneList, opts...) + }, retryableOperationTimeout, retryableOperationInterval).Should(Succeed(), "Failed to list ScalewayManagedControlPlane object for Cluster %s", klog.KRef(input.Namespace, input.ClusterName)) + Expect(len(controlPlaneList.Items)).ToNot(BeNumerically(">", 1), "Cluster %s should not have more than 1 ScalewayManagedControlPlane object", klog.KRef(input.Namespace, input.ClusterName)) + if len(controlPlaneList.Items) == 1 { + return &controlPlaneList.Items[0] + } + return nil +} + +func setDefaults(input *ApplyManagedClusterTemplateAndWaitInput) { + if input.WaitForControlPlaneInitialized == nil { + input.WaitForControlPlaneInitialized = func(ctx context.Context, input ApplyManagedClusterTemplateAndWaitInput, result *ApplyManagedClusterTemplateAndWaitResult) { + result.ControlPlane = DiscoveryAndWaitForManagedControlPlaneInitialized(ctx, DiscoveryAndWaitForManagedControlPlaneInitializedInput{ + Lister: input.ClusterProxy.GetClient(), + Cluster: result.Cluster, + }, input.WaitForControlPlaneIntervals...) + } + } +} diff --git a/test/e2e/scaleway_managed_test.go b/test/e2e/scaleway_managed_test.go new file mode 100644 index 0000000..ca94ab3 --- /dev/null +++ b/test/e2e/scaleway_managed_test.go @@ -0,0 +1,110 @@ +package e2e + +import ( + "context" + "fmt" + "os" + "path/filepath" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/ptr" + capi_e2e "sigs.k8s.io/cluster-api/test/e2e" + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/test/framework/clusterctl" + "sigs.k8s.io/cluster-api/util" +) + +var _ = Describe("Managed workload cluster creation", func() { + var ( + ctx = context.TODO() + specName = "managed" // TODO: set to "create-managed-workload-cluster" when tag issue is fixed. + namespace *corev1.Namespace + cancelWatches context.CancelFunc + result *ApplyManagedClusterTemplateAndWaitResult + clusterName string + clusterctlLogFolder string + ) + + BeforeEach(func() { + Expect(e2eConfig).ToNot(BeNil(), "Invalid argument. e2eConfig can't be nil when calling %s spec", specName) + Expect(clusterctlConfigPath).To(BeAnExistingFile(), "Invalid argument. clusterctlConfigPath must be an existing file when calling %s spec", specName) + Expect(bootstrapClusterProxy).ToNot(BeNil(), "Invalid argument. bootstrapClusterProxy can't be nil when calling %s spec", specName) + Expect(os.MkdirAll(artifactFolder, 0o755)).To(Succeed(), "Invalid argument. artifactFolder can't be created for %s spec", specName) + Expect(e2eConfig.Variables).To(HaveKey(capi_e2e.KubernetesVersion)) + + clusterName = fmt.Sprintf("caps-e2e-%s", util.RandomString(6)) + + // Setup a Namespace where to host objects for this spec and create a watcher for the namespace events. + namespace, cancelWatches = setupSpecNamespace(ctx, specName, bootstrapClusterProxy, artifactFolder) + + result = new(ApplyManagedClusterTemplateAndWaitResult) + + // We need to override clusterctl apply log folder to avoid getting our credentials exposed. + clusterctlLogFolder = filepath.Join(os.TempDir(), "clusters", bootstrapClusterProxy.GetName()) + }) + + AfterEach(func() { + cleanInput := cleanupInput{ + SpecName: specName, + Cluster: result.Cluster, + ClusterProxy: bootstrapClusterProxy, + ClusterctlConfigPath: clusterctlConfigPath, + Namespace: namespace, + CancelWatches: cancelWatches, + IntervalsGetter: e2eConfig.GetIntervals, + SkipCleanup: skipCleanup, + ArtifactFolder: artifactFolder, + } + + dumpSpecResourcesAndCleanup(ctx, cleanInput) + }) + + Context("Creating a Scaleway Kapsule cluster", func() { + It("Should create a cluster with 1 machine pool and scale", func() { + By("Initializes with 1 machine pool") + + ApplyManagedClusterTemplateAndWait(ctx, ApplyManagedClusterTemplateAndWaitInput{ + ClusterProxy: bootstrapClusterProxy, + ConfigCluster: clusterctl.ConfigClusterInput{ + LogFolder: clusterctlLogFolder, + ClusterctlConfigPath: clusterctlConfigPath, + KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(), + InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, + Flavor: "managed", + Namespace: namespace.Name, + ClusterName: clusterName, + KubernetesVersion: e2eConfig.MustGetVariable(capi_e2e.KubernetesVersion), + ControlPlaneMachineCount: ptr.To[int64](1), + WorkerMachineCount: ptr.To[int64](3), + ClusterctlVariables: map[string]string{ + "WORKER_PUBLIC_IP_DISABLED": "true", + "PUBLIC_GATEWAYS": "[{}]", + }, + }, + WaitForClusterIntervals: e2eConfig.GetIntervals(specName, "wait-cluster"), + WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"), + WaitForMachinePools: e2eConfig.GetIntervals(specName, "wait-worker-machine-pools"), + }, result) + + By("Scaling the machine pool up") + framework.ScaleMachinePoolAndWait(ctx, framework.ScaleMachinePoolAndWaitInput{ + ClusterProxy: bootstrapClusterProxy, + Cluster: result.Cluster, + Replicas: 4, + MachinePools: result.MachinePools, + WaitForMachinePoolToScale: e2eConfig.GetIntervals(specName, "wait-worker-machine-pools"), + }) + + By("Scaling the machine pool down") + framework.ScaleMachinePoolAndWait(ctx, framework.ScaleMachinePoolAndWaitInput{ + ClusterProxy: bootstrapClusterProxy, + Cluster: result.Cluster, + Replicas: 3, + MachinePools: result.MachinePools, + WaitForMachinePoolToScale: e2eConfig.GetIntervals(specName, "wait-worker-machine-pools"), + }) + }) + }) +})