diff --git a/api/v1beta1/zz_generated.conversion.go b/api/v1beta1/zz_generated.conversion.go index 752c8540b..de44b01bf 100644 --- a/api/v1beta1/zz_generated.conversion.go +++ b/api/v1beta1/zz_generated.conversion.go @@ -1444,6 +1444,7 @@ func autoConvert_v1beta2_IBMVPCMachineStatus_To_v1beta1_IBMVPCMachineStatus(in * // WARNING: in.FailureMessage requires manual conversion: does not exist in peer-type out.InstanceStatus = in.InstanceStatus // WARNING: in.LoadBalancerPoolMembers requires manual conversion: does not exist in peer-type + // WARNING: in.V1Beta2 requires manual conversion: does not exist in peer-type return nil } diff --git a/api/v1beta2/conditions_consts.go b/api/v1beta2/conditions_consts.go index 964daa0a2..a3748d535 100644 --- a/api/v1beta2/conditions_consts.go +++ b/api/v1beta2/conditions_consts.go @@ -37,6 +37,35 @@ const ( IBMPowerVSMachineReadyUnknownV1Beta2Reason = clusterv1beta1.ReadyUnknownV1Beta2Reason ) +const ( + // IBMVPCMachineReadyV1Beta2Condition is true if the IBMVPCMachine's deletionTimestamp is not set, IBMVPCMachine's + // IBMVPCMachineInstanceReadyV1Beta2Condition is true. + IBMVPCMachineReadyV1Beta2Condition = clusterv1beta1.ReadyV1Beta2Condition + + // IBMVPCMachineReadyV1Beta2Reason surfaces when the IBMVPCMachine readiness criteria is met. + IBMVPCMachineReadyV1Beta2Reason = clusterv1beta1.ReadyV1Beta2Reason + + // IBMVPCMachineNotReadyV1Beta2Reason surfaces when the IBMVPCMachine readiness criteria is not met. + IBMVPCMachineNotReadyV1Beta2Reason = clusterv1beta1.NotReadyV1Beta2Reason + + // IBMVPCMachineReadyUnknownV1Beta2Reason surfaces when at least one IBMVPCMachine readiness criteria is unknown. + IBMVPCMachineReadyUnknownV1Beta2Reason = clusterv1beta1.ReadyUnknownV1Beta2Reason +) + +// IBMVPCMachine's InstanceReady condition and corresponding reasons that will be used in v1Beta2 API version. +const ( + // IBMVPCMachineInstanceReadyV1Beta2Condition documents the status of the instance that is controlled + // by the IBMVPCMachine. + IBMVPCMachineInstanceReadyV1Beta2Condition = "InstanceReady" + + // IBMVPCMachineInstanceReadyV1Beta2Reason surfaces when the instance that is controlled + // by the IBMVPCMachine is ready. + IBMVPCMachineInstanceReadyV1Beta2Reason = "InstanceReady" + + // IBMVPCMachineInstanceNotReadyV1Beta2Reason surfaces when the instance that is controlled + // by the IBMVPCMachine is not ready. + IBMVPCMachineInstanceNotReadyV1Beta2Reason = "InstanceNotReady" +) const ( // IBMPowerVSMachineInstanceReadyV1Beta2Condition documents the status of the instance that is controlled // by the IBMPowerVSMachine. @@ -95,6 +124,9 @@ const ( // InstanceNotReadyReason used when the instance is in a not ready state. InstanceNotReadyReason = "InstanceNotReady" + // InstanceDeletingReason is used when the instance is in deleting state. + InstanceDeletingReason = "InstanceDeleting" + // InstanceStateUnknownReason used when the instance is in a unknown state. InstanceStateUnknownReason = "InstanceStateUnknown" ) diff --git a/api/v1beta2/ibmvpcmachine_types.go b/api/v1beta2/ibmvpcmachine_types.go index d726128fe..065ed282d 100644 --- a/api/v1beta2/ibmvpcmachine_types.go +++ b/api/v1beta2/ibmvpcmachine_types.go @@ -175,6 +175,21 @@ type IBMVPCMachineStatus struct { // LoadBalancerPoolMembers is the status of IBM Cloud VPC Load Balancer Backend Pools the machine is a member. // +optional LoadBalancerPoolMembers []VPCLoadBalancerBackendPoolMember `json:"loadBalancerPoolMembers,omitempty"` + + // V1beta2 groups all the fields that will be added or modified in IBMVPCMachine's status with the V1Beta2 version. + // +optional + V1Beta2 *IBMVPCMachineV1Beta2Status `json:"v1beta2,omitempty"` +} + +// IBMVPCMachineV1Beta2Status groups all the fields that will be added or modified in IBMVPCMachineStatus with the V1Beta2 version. +// See https://github.com/kubernetes-sigs/cluster-api/blob/main/docs/proposals/20240916-improve-status-in-CAPI-resources.md for more context. +type IBMVPCMachineV1Beta2Status struct { + // Conditions represents the observations of a IBMVPCMachine's current state. + // +optional + // +listType=map + // +listMapKey=type + // +kubebuilder:validation:MaxItems=32 + Conditions []metav1.Condition `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -211,6 +226,22 @@ type IBMVPCMachineList struct { Items []IBMVPCMachine `json:"items"` } +// GetV1Beta2Conditions returns the set of conditions for IBMVPCMachine object. +func (r *IBMVPCMachine) GetV1Beta2Conditions() []metav1.Condition { + if r.Status.V1Beta2 == nil { + return nil + } + return r.Status.V1Beta2.Conditions +} + +// SetV1Beta2Conditions sets conditions for IBMVPCMachine object. +func (r *IBMVPCMachine) SetV1Beta2Conditions(conditions []metav1.Condition) { + if r.Status.V1Beta2 == nil { + r.Status.V1Beta2 = &IBMVPCMachineV1Beta2Status{} + } + r.Status.V1Beta2.Conditions = conditions +} + func init() { objectTypes = append(objectTypes, &IBMVPCMachine{}, &IBMVPCMachineList{}) } diff --git a/api/v1beta2/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go index 2bbd43f5e..ed48d4d00 100644 --- a/api/v1beta2/zz_generated.deepcopy.go +++ b/api/v1beta2/zz_generated.deepcopy.go @@ -1318,6 +1318,11 @@ func (in *IBMVPCMachineStatus) DeepCopyInto(out *IBMVPCMachineStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.V1Beta2 != nil { + in, out := &in.V1Beta2, &out.V1Beta2 + *out = new(IBMVPCMachineV1Beta2Status) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMVPCMachineStatus. @@ -1443,6 +1448,28 @@ func (in *IBMVPCMachineTemplateStatus) DeepCopy() *IBMVPCMachineTemplateStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBMVPCMachineV1Beta2Status) DeepCopyInto(out *IBMVPCMachineV1Beta2Status) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMVPCMachineV1Beta2Status. +func (in *IBMVPCMachineV1Beta2Status) DeepCopy() *IBMVPCMachineV1Beta2Status { + if in == nil { + return nil + } + out := new(IBMVPCMachineV1Beta2Status) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IBMVPCResourceReference) DeepCopyInto(out *IBMVPCResourceReference) { *out = *in diff --git a/cloud/scope/machine.go b/cloud/scope/machine.go index eb943d42d..85da6b134 100644 --- a/cloud/scope/machine.go +++ b/cloud/scope/machine.go @@ -33,6 +33,7 @@ import ( "k8s.io/klog/v2" "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" @@ -62,7 +63,6 @@ type MachineScopeParams struct { // MachineScope defines a scope defined around a machine and its cluster. type MachineScope struct { - logr.Logger Client client.Client patchHelper *v1beta1patch.Helper @@ -127,7 +127,6 @@ func NewMachineScope(params MachineScopeParams) (*MachineScope, error) { } return &MachineScope{ - Logger: params.Logger, Client: params.Client, IBMVPCClient: vpcClient, GlobalTaggingClient: globalTaggingClient, @@ -140,7 +139,8 @@ func NewMachineScope(params MachineScopeParams) (*MachineScope, error) { } // CreateMachine creates a vpc machine. -func (m *MachineScope) CreateMachine() (*vpcv1.Instance, error) { //nolint: gocyclo +func (m *MachineScope) CreateMachine(ctx context.Context) (*vpcv1.Instance, error) { //nolint: gocyclo + log := ctrl.LoggerFrom(ctx) instanceReply, err := m.ensureInstanceUnique(m.IBMVPCMachine.Name) if err != nil { return nil, err @@ -270,7 +270,7 @@ func (m *MachineScope) CreateMachine() (*vpcv1.Instance, error) { //nolint: gocy // Populate Placement target details, if provided. var placementTarget vpcv1.InstancePlacementTargetPrototypeIntf if m.IBMVPCMachine.Spec.PlacementTarget != nil { - placementTarget, err = m.configurePlacementTarget() + placementTarget, err = m.configurePlacementTarget(ctx) if err != nil { return nil, fmt.Errorf("error configuration machine placement target: %w", err) } @@ -280,7 +280,7 @@ func (m *MachineScope) CreateMachine() (*vpcv1.Instance, error) { //nolint: gocy sshKeys := make([]vpcv1.KeyIdentityIntf, 0) if m.IBMVPCMachine.Spec.SSHKeys != nil { for _, sshKey := range m.IBMVPCMachine.Spec.SSHKeys { - keyID, err := fetchKeyID(sshKey, m) + keyID, err := fetchKeyID(ctx, sshKey, m) if err != nil { return nil, fmt.Errorf("error while fetching SSHKey: %v error: %v", sshKey, err) } @@ -294,7 +294,7 @@ func (m *MachineScope) CreateMachine() (*vpcv1.Instance, error) { //nolint: gocy // Populate boot volume attachment, if provided. var bootVolumeAttachment *vpcv1.VolumeAttachmentPrototypeInstanceByImageContext if m.IBMVPCMachine.Spec.BootVolume != nil { - bootVolumeAttachment = m.volumeToVPCVolumeAttachment(m.IBMVPCMachine.Spec.BootVolume) + bootVolumeAttachment = m.volumeToVPCVolumeAttachment(ctx, m.IBMVPCMachine.Spec.BootVolume) } // Configure the Machine's Image or CatalogOffering based on provided fields. @@ -309,7 +309,7 @@ func (m *MachineScope) CreateMachine() (*vpcv1.Instance, error) { //nolint: gocy VPC: vpcIdentity, Zone: zone, } - imageID, err := fetchImageID(m.IBMVPCMachine.Spec.Image, m) + imageID, err := fetchImageID(ctx, m.IBMVPCMachine.Spec.Image, m) if err != nil { record.Warnf(m.IBMVPCMachine, "FailedRetrieveImage", "Failed image retrieval - %w", err) return nil, fmt.Errorf("error while fetching image ID: %w", err) @@ -329,7 +329,7 @@ func (m *MachineScope) CreateMachine() (*vpcv1.Instance, error) { //nolint: gocy imageInstancePrototype.BootVolumeAttachment = bootVolumeAttachment } - m.Logger.Info("machine creation configured with existing image", "machineName", m.IBMVPCMachine.Name, "imageID", *imageID) + log.Info("Machine creation configured with existing image", "imageID", *imageID) options.SetInstancePrototype(imageInstancePrototype) } else if m.IBMVPCMachine.Spec.CatalogOffering != nil { catalogInstancePrototype := &vpcv1.InstancePrototypeInstanceByCatalogOffering{ @@ -347,13 +347,13 @@ func (m *MachineScope) CreateMachine() (*vpcv1.Instance, error) { //nolint: gocy catalogOfferingPrototype.Offering = &vpcv1.CatalogOfferingIdentityCatalogOfferingByCRN{ CRN: m.IBMVPCMachine.Spec.CatalogOffering.OfferingCRN, } - m.Logger.Info("machine creation configured with catalog offering", "machineName", m.IBMVPCMachine.Name, "offeringCRN", *m.IBMVPCMachine.Spec.CatalogOffering.OfferingCRN) + log.Info("Machine creation configured with catalog offering", "offeringCRN", *m.IBMVPCMachine.Spec.CatalogOffering.OfferingCRN) } else if m.IBMVPCMachine.Spec.CatalogOffering.VersionCRN != nil { // TODO(cjschaef): Perform lookup or use webhook validation to confirm Catalog Offering Version CRN. catalogOfferingPrototype.Version = &vpcv1.CatalogOfferingVersionIdentityCatalogOfferingVersionByCRN{ CRN: m.IBMVPCMachine.Spec.CatalogOffering.VersionCRN, } - m.Logger.Info("machine creation configured with catalog version", "machineName", m.IBMVPCMachine.Name, "versionCRN", *m.IBMVPCMachine.Spec.CatalogOffering.VersionCRN) + log.Info("Machine creation configured with catalog version", "versionCRN", *m.IBMVPCMachine.Spec.CatalogOffering.VersionCRN) } else { // TODO(cjschaef): Look to add webhook validation to ensure one is provided. return nil, fmt.Errorf("error catalog offering missing offering crn and version crn, one must be provided") @@ -363,7 +363,7 @@ func (m *MachineScope) CreateMachine() (*vpcv1.Instance, error) { //nolint: gocy catalogOfferingPrototype.Plan = &vpcv1.CatalogOfferingVersionPlanIdentityCatalogOfferingVersionPlanByCRN{ CRN: m.IBMVPCMachine.Spec.CatalogOffering.PlanCRN, } - m.Logger.Info("machine creation configured with catalog plan", "machineName", m.IBMVPCMachine.Name, "planCRN", *m.IBMVPCMachine.Spec.CatalogOffering.PlanCRN) + log.Info("Machine creation configured with catalog plan", "planCRN", *m.IBMVPCMachine.Spec.CatalogOffering.PlanCRN) } // Configure additional fields if they were populated. @@ -384,7 +384,7 @@ func (m *MachineScope) CreateMachine() (*vpcv1.Instance, error) { //nolint: gocy return nil, fmt.Errorf("error no machine image or catalog offering provided to build: %s", m.IBMVPCMachine.Spec.Name) } - m.Logger.Info("creating instance", "createOptions", options, "name", m.IBMVPCMachine.Name, "profile", *profile.Name, "resourceGroup", resourceGroupIdentity, "vpc", vpcIdentity, "zone", zone) + log.Info("Creating instance", "createOptions", options, "name", m.IBMVPCMachine.Name, "profile", *profile.Name, "resourceGroup", resourceGroupIdentity, "vpc", vpcIdentity, "zone", zone) instance, _, err := m.IBMVPCClient.CreateInstance(options) if err != nil { record.Warnf(m.IBMVPCMachine, "FailedCreateInstance", "Failed instance creation - %s, %v", options, err) @@ -395,7 +395,8 @@ func (m *MachineScope) CreateMachine() (*vpcv1.Instance, error) { //nolint: gocy } // configurePlacementTarget will configure a Machine's Placement Target based on the Machine's provided configuration, if supplied. -func (m *MachineScope) configurePlacementTarget() (vpcv1.InstancePlacementTargetPrototypeIntf, error) { +func (m *MachineScope) configurePlacementTarget(ctx context.Context) (vpcv1.InstancePlacementTargetPrototypeIntf, error) { + log := ctrl.LoggerFrom(ctx) // TODO(cjschaef): We currently don't support the other placement target options (Dedicated Host Group, Placement Group), they need to be added. if m.IBMVPCMachine.Spec.PlacementTarget.DedicatedHost != nil { // Lookup Dedicated Host ID by Name if it was provided. @@ -412,7 +413,7 @@ func (m *MachineScope) configurePlacementTarget() (vpcv1.InstancePlacementTarget dedicatedHostID = dHost.ID } - m.Logger.Info("machine creation configured with dedicated host placement", "machineName", m.IBMVPCMachine.Name, "dedicatedHostID", *dedicatedHostID) + log.Info("Machine creation configured with dedicated host placement", "dedicatedHostID", *dedicatedHostID) return &vpcv1.InstancePlacementTargetPrototypeDedicatedHostGroupIdentityDedicatedHostGroupIdentityByID{ ID: dedicatedHostID, }, nil @@ -420,7 +421,8 @@ func (m *MachineScope) configurePlacementTarget() (vpcv1.InstancePlacementTarget return nil, nil } -func (m *MachineScope) volumeToVPCVolumeAttachment(volume *infrav1.VPCVolume) *vpcv1.VolumeAttachmentPrototypeInstanceByImageContext { +func (m *MachineScope) volumeToVPCVolumeAttachment(ctx context.Context, volume *infrav1.VPCVolume) *vpcv1.VolumeAttachmentPrototypeInstanceByImageContext { + log := ctrl.LoggerFrom(ctx) bootVolume := &vpcv1.VolumeAttachmentPrototypeInstanceByImageContext{ DeleteVolumeOnInstanceDelete: core.BoolPtr(volume.DeleteVolumeOnInstanceDelete), Volume: &vpcv1.VolumePrototypeInstanceByImageContext{}, @@ -448,7 +450,7 @@ func (m *MachineScope) volumeToVPCVolumeAttachment(volume *infrav1.VPCVolume) *v bootVolume.Volume.EncryptionKey = &vpcv1.EncryptionKeyIdentity{ CRN: core.StringPtr(volume.EncryptionKeyCRN), } - m.Logger.Info("machine creation configured with volumn encryption key", "machineName", m.IBMVPCMachine.Name, "encryptionKeyCRN", volume.EncryptionKeyCRN) + log.Info("Machine creation configured with volumn encryption key", "encryptionKeyCRN", volume.EncryptionKeyCRN) } return bootVolume @@ -545,7 +547,7 @@ func (m *MachineScope) getLoadBalancerPoolID(pool *infrav1.VPCResource, loadBala } // ReconcileVPCLoadBalancerPoolMember reconciles a Machine's Load Balancer Pool membership. -func (m *MachineScope) ReconcileVPCLoadBalancerPoolMember(poolMember infrav1.VPCLoadBalancerBackendPoolMember) (bool, error) { +func (m *MachineScope) ReconcileVPCLoadBalancerPoolMember(ctx context.Context, poolMember infrav1.VPCLoadBalancerBackendPoolMember) (bool, error) { // Collect the Machine's internal IP. internalIP := m.GetMachineInternalIP() if internalIP == nil { @@ -554,7 +556,7 @@ func (m *MachineScope) ReconcileVPCLoadBalancerPoolMember(poolMember infrav1.VPC } // Check if Instance is already a member of Load Balancer Backend Pool. - existingMember, err := m.checkVPCLoadBalancerPoolMemberExists(poolMember, internalIP) + existingMember, err := m.checkVPCLoadBalancerPoolMemberExists(ctx, poolMember, internalIP) if err != nil { return false, fmt.Errorf("error failed to check if member exists in pool") } else if existingMember != nil { @@ -567,11 +569,12 @@ func (m *MachineScope) ReconcileVPCLoadBalancerPoolMember(poolMember infrav1.VPC } // Otherwise, create VPC Load Balancer Backend Pool Member - return m.createVPCLoadBalancerPoolMember(poolMember, internalIP) + return m.createVPCLoadBalancerPoolMember(ctx, poolMember, internalIP) } // checkVPCLoadBalancerPoolMemberExists determines whether a Machine's Load Balancer Pool membership already exists. -func (m *MachineScope) checkVPCLoadBalancerPoolMemberExists(poolMember infrav1.VPCLoadBalancerBackendPoolMember, internalIP *string) (*vpcv1.LoadBalancerPoolMember, error) { +func (m *MachineScope) checkVPCLoadBalancerPoolMemberExists(ctx context.Context, poolMember infrav1.VPCLoadBalancerBackendPoolMember, internalIP *string) (*vpcv1.LoadBalancerPoolMember, error) { + log := ctrl.LoggerFrom(ctx) loadBalancerID, err := m.getLoadBalancerID(&poolMember.LoadBalancer) if err != nil { return nil, fmt.Errorf("error checking if load balancer pool member exists: %w", err) @@ -600,7 +603,7 @@ func (m *MachineScope) checkVPCLoadBalancerPoolMemberExists(poolMember infrav1.V if target, ok := member.Target.(*vpcv1.LoadBalancerPoolMemberTarget); ok { // Verify the target address matches the Machine's internal IP. if *target.Address == *internalIP { - m.Logger.Info("found existing load balancer pool member for machine", "machineName", m.IBMVPCMachine.Spec.Name, "internalIP", *internalIP, "poolID", *poolID, "loadBalancerID", *loadBalancerID) + log.Info("Found existing load balancer pool member for machine", "internalIP", *internalIP, "poolID", *poolID, "loadBalancerID", *loadBalancerID) return ptr.To(member), nil } } @@ -611,7 +614,8 @@ func (m *MachineScope) checkVPCLoadBalancerPoolMemberExists(poolMember infrav1.V } // createVPCLoadBalancerPoolMember will create a new member within a Load Balancer Pool for the Machine's internal IP. -func (m *MachineScope) createVPCLoadBalancerPoolMember(poolMember infrav1.VPCLoadBalancerBackendPoolMember, internalIP *string) (bool, error) { +func (m *MachineScope) createVPCLoadBalancerPoolMember(ctx context.Context, poolMember infrav1.VPCLoadBalancerBackendPoolMember, internalIP *string) (bool, error) { + log := ctrl.LoggerFrom(ctx) // Retrieve the Load Balancer ID. loadBalancerID, err := m.getLoadBalancerID(&poolMember.LoadBalancer) if err != nil { @@ -644,7 +648,7 @@ func (m *MachineScope) createVPCLoadBalancerPoolMember(poolMember infrav1.VPCLoa if err != nil { return false, fmt.Errorf("error failed creating load balancer backend pool member: %w", err) } - m.Logger.Info("created load balancer backend pool member", "instanceID", m.IBMVPCMachine.Status.InstanceID, "loadBalancerID", loadBalancerID, "loadBalancerBackendPoolID", loadBalancerBackendPoolID, "port", poolMember.Port, "loadBalancerBackendPoolMemberID", loadBalancerPoolMember.ID) + log.Info("Created load balancer backend pool member", "instanceID", m.IBMVPCMachine.Status.InstanceID, "loadBalancerID", loadBalancerID, "loadBalancerBackendPoolID", loadBalancerBackendPoolID, "port", poolMember.Port, "loadBalancerBackendPoolMemberID", loadBalancerPoolMember.ID) // Add the new pool member details to the Machine Status. // To prevent additional API calls, only use ID's and not Name's, as reconciliation does not rely on Name's for these resources in Status. @@ -670,7 +674,8 @@ func (m *MachineScope) createVPCLoadBalancerPoolMember(poolMember infrav1.VPCLoa } // CreateVPCLoadBalancerPoolMember creates a new pool member and adds it to the load balancer pool. -func (m *MachineScope) CreateVPCLoadBalancerPoolMember(internalIP *string, targetPort int64) (*vpcv1.LoadBalancerPoolMember, error) { +func (m *MachineScope) CreateVPCLoadBalancerPoolMember(ctx context.Context, internalIP *string, targetPort int64) (*vpcv1.LoadBalancerPoolMember, error) { + log := ctrl.LoggerFrom(ctx) loadBalancer, _, err := m.IBMVPCClient.GetLoadBalancer(&vpcv1.GetLoadBalancerOptions{ ID: m.IBMVPCCluster.Status.VPCEndpoint.LBID, }) @@ -706,7 +711,7 @@ func (m *MachineScope) CreateVPCLoadBalancerPoolMember(internalIP *string, targe if _, ok := member.Target.(*vpcv1.LoadBalancerPoolMemberTarget); ok { mtarget := member.Target.(*vpcv1.LoadBalancerPoolMemberTarget) if *mtarget.Address == *internalIP && *member.Port == targetPort { - m.Logger.V(3).Info("PoolMember already exist") + log.V(3).Info("PoolMember already exist") return nil, nil } } @@ -720,15 +725,16 @@ func (m *MachineScope) CreateVPCLoadBalancerPoolMember(internalIP *string, targe } // DeleteVPCLoadBalancerPoolMember deletes a pool member from the load balancer pool. -func (m *MachineScope) DeleteVPCLoadBalancerPoolMember() error { +func (m *MachineScope) DeleteVPCLoadBalancerPoolMember(ctx context.Context) error { + log := ctrl.LoggerFrom(ctx) if m.IBMVPCMachine.Status.InstanceID == "" { - m.Info("instance is not created, ignore deleting load balancer pool member") + log.Info("Instance is not created, ignore deleting load balancer pool member") return nil } // If the Machine has Load Balancer Pool Members defined in its Status (part of extended VPC Machine support), process the removal of those members versus the legacy single LB design. if len(m.IBMVPCMachine.Status.LoadBalancerPoolMembers) > 0 { - return m.deleteVPCLoadBalancerPoolMembers() + return m.deleteVPCLoadBalancerPoolMembers(ctx) } loadBalancer, _, err := m.IBMVPCClient.GetLoadBalancer(&vpcv1.GetLoadBalancerOptions{ @@ -782,7 +788,8 @@ func (m *MachineScope) DeleteVPCLoadBalancerPoolMember() error { // deleteVPCLoadBalancerPoolMembers provides support to delete Load Balancer Pools Members for a Machine that are tracked in the Machine's Status, which is part of the extended VPC Machine support. // This new support allows a Machine to have members in multiple Load Balancers, as defined by the Machine Spec, rather than defaulting (legacy) to the single Cluster Load Balancer. -func (m *MachineScope) deleteVPCLoadBalancerPoolMembers() error { +func (m *MachineScope) deleteVPCLoadBalancerPoolMembers(ctx context.Context) error { + log := ctrl.LoggerFrom(ctx) // Retrieve the Instance details immediately (without them the member cannot be safely deleted). instanceOptions := &vpcv1.GetInstanceOptions{ ID: ptr.To(m.IBMVPCMachine.Status.InstanceID), @@ -795,7 +802,7 @@ func (m *MachineScope) deleteVPCLoadBalancerPoolMembers() error { if instanceDetails.PrimaryNetworkInterface == nil || instanceDetails.PrimaryNetworkInterface.PrimaryIP == nil || instanceDetails.PrimaryNetworkInterface.PrimaryIP.Address == nil { return fmt.Errorf("error instance is missing the primary network interface IP address for load balancer pool member deletion for machine: %s", m.IBMVPCMachine.Name) } - m.Logger.V(5).Info("collected instance details for load balancer pool member deletion", "machienName", m.IBMVPCMachine.Name, "instanceID", *instanceDetails.ID, "instanceIP", *instanceDetails.PrimaryNetworkInterface.PrimaryIP.Address) + log.V(5).Info("collected instance details for load balancer pool member deletion", "machienName", m.IBMVPCMachine.Name, "instanceID", *instanceDetails.ID, "instanceIP", *instanceDetails.PrimaryNetworkInterface.PrimaryIP.Address) cleanupIncomplete := false for _, member := range m.IBMVPCMachine.Status.LoadBalancerPoolMembers { @@ -813,21 +820,21 @@ func (m *MachineScope) deleteVPCLoadBalancerPoolMembers() error { if err != nil { return fmt.Errorf("error retrieving load balancer for load balancer pool member deletion for machine %s: %w", m.IBMVPCMachine.Name, err) } - m.Logger.V(5).Info("collected load balancer for load balancer pool member deletion", "machineName", m.IBMVPCMachine.Name, "loadBalancerID", *loadBalancerDetails.ID) + log.V(5).Info("collected load balancer for load balancer pool member deletion", "machineName", m.IBMVPCMachine.Name, "loadBalancerID", *loadBalancerDetails.ID) // Lookup the Load Balancer Pool ID, if only name is available. loadBalancerPoolID, err := m.getLoadBalancerPoolID(ptr.To(member.Pool), *loadBalancerDetails.ID) if err != nil { return fmt.Errorf("error retrieving load balancer pool id for load balancer pool member deletion for machine %s: %w", m.IBMVPCMachine.Name, err) } - m.Logger.V(5).Info("collected load balancer pool id for load balancer pool member deletion", "machineName", m.IBMVPCMachine.Name, "loadBalancerPoolID", *loadBalancerPoolID) + log.V(5).Info("collected load balancer pool id for load balancer pool member deletion", "machineName", m.IBMVPCMachine.Name, "loadBalancerPoolID", *loadBalancerPoolID) listMembersOptions := &vpcv1.ListLoadBalancerPoolMembersOptions{ LoadBalancerID: loadBalancerDetails.ID, PoolID: loadBalancerPoolID, } - m.Logger.V(5).Info("list load balancer pool members options", "machineName", m.IBMVPCMachine.Name, "options", *listMembersOptions) + log.V(5).Info("list load balancer pool members options", "machineName", m.IBMVPCMachine.Name, "options", *listMembersOptions) poolMembers, _, err := m.IBMVPCClient.ListLoadBalancerPoolMembers(listMembersOptions) if err != nil { return fmt.Errorf("error retrieving load balancer pool members for load balancer pool member deletion for machine %s: %w", m.IBMVPCMachine.Name, err) @@ -840,10 +847,10 @@ func (m *MachineScope) deleteVPCLoadBalancerPoolMembers() error { continue } - m.Logger.V(3).Info("found load balancer pool member to delete", "machineName", m.IBMVPCMachine.Name, "poolMemberID", *poolMember.ID) + log.V(3).Info("found load balancer pool member to delete", "machineName", m.IBMVPCMachine.Name, "poolMemberID", *poolMember.ID) // Make LB status check now that it has been determined a change is required. if *loadBalancerDetails.ProvisioningStatus != string(infrav1.VPCLoadBalancerStateActive) { - m.Logger.V(5).Info("load balancer not in active status prior to load balancer pool member deletion", "machineName", m.IBMVPCMachine.Name, "loadBalancerID", *loadBalancerDetails.ID, "loadBalancerProvisioningStatus", *loadBalancerDetails.ProvisioningStatus) + log.V(5).Info("load balancer not in active status prior to load balancer pool member deletion", "machineName", m.IBMVPCMachine.Name, "loadBalancerID", *loadBalancerDetails.ID, "loadBalancerProvisioningStatus", *loadBalancerDetails.ProvisioningStatus) // Set flag that some cleanup was not completed, and break out of member target loop, to try next member from Machine Status. cleanupIncomplete = true break @@ -855,13 +862,13 @@ func (m *MachineScope) deleteVPCLoadBalancerPoolMembers() error { PoolID: loadBalancerPoolID, } - m.Logger.V(5).Info("delete load balancer pool member options", "machineName", m.IBMVPCMachine.Name, "options", *deleteOptions) + log.V(5).Info("delete load balancer pool member options", "machineName", m.IBMVPCMachine.Name, "options", *deleteOptions) // Delete the matching Load Balancer Pool Member. _, err := m.IBMVPCClient.DeleteLoadBalancerPoolMember(deleteOptions) if err != nil { return fmt.Errorf("error deleting load balancer pool member for machine: %s: %w", m.IBMVPCMachine.Name, err) } - m.Logger.V(3).Info("deleted load balancer pool member", "machineName", m.IBMVPCMachine.Name, "loadBalancerID", *loadBalancerDetails.ID, "loadBalancerPoolID", *loadBalancerPoolID, "loadBalancerPoolMemberID", *poolMember.ID) + log.V(3).Info("deleted load balancer pool member", "machineName", m.IBMVPCMachine.Name, "loadBalancerID", *loadBalancerDetails.ID, "loadBalancerPoolID", *loadBalancerPoolID, "loadBalancerPoolMemberID", *poolMember.ID) } } @@ -902,7 +909,8 @@ func (m *MachineScope) GetBootstrapData() (string, error) { return string(value), nil } -func fetchKeyID(key *infrav1.IBMVPCResourceReference, m *MachineScope) (*string, error) { +func fetchKeyID(ctx context.Context, key *infrav1.IBMVPCResourceReference, m *MachineScope) (*string, error) { + log := ctrl.LoggerFrom(ctx) if key.ID == nil && key.Name == nil { return nil, fmt.Errorf("both ID and Name can't be nil") } @@ -921,8 +929,7 @@ func fetchKeyID(key *infrav1.IBMVPCResourceReference, m *MachineScope) (*string, keysList, _, err := m.IBMVPCClient.ListKeys(listKeysOptions) if err != nil { - m.Logger.Error(err, "Failed to get keys") - return false, "", err + return false, "", fmt.Errorf("failed to get keys: %w", err) } if keysList == nil { @@ -931,7 +938,7 @@ func fetchKeyID(key *infrav1.IBMVPCResourceReference, m *MachineScope) (*string, for i, ks := range keysList.Keys { if *ks.Name == *key.Name { - m.Logger.V(3).Info("Key found with ID", "Key", *ks.Name, "ID", *ks.ID) + log.V(3).Info("Key found with ID", "Key", *ks.Name, "ID", *ks.ID) k = &keysList.Keys[i] return true, "", nil } @@ -954,7 +961,8 @@ func fetchKeyID(key *infrav1.IBMVPCResourceReference, m *MachineScope) (*string, return nil, fmt.Errorf("sshkey does not exist - failed to find Key ID") } -func fetchImageID(image *infrav1.IBMVPCResourceReference, m *MachineScope) (*string, error) { +func fetchImageID(ctx context.Context, image *infrav1.IBMVPCResourceReference, m *MachineScope) (*string, error) { + log := ctrl.LoggerFrom(ctx) if image.ID == nil && image.Name == nil { return nil, fmt.Errorf("both ID and Name can't be nil") } @@ -979,8 +987,7 @@ func fetchImageID(image *infrav1.IBMVPCResourceReference, m *MachineScope) (*str imagesList, _, err := m.IBMVPCClient.ListImages(listImagesOptions) if err != nil { - m.Logger.Error(err, "Failed to get images") - return false, "", err + return false, "", fmt.Errorf("failed to get images: %w", err) } if imagesList == nil { @@ -989,7 +996,7 @@ func fetchImageID(image *infrav1.IBMVPCResourceReference, m *MachineScope) (*str for j, i := range imagesList.Images { if *image.Name == *i.Name { - m.Logger.Info("Image found with ID", "Image", *i.Name, "ID", *i.ID) + log.Info("Image found with ID", "Image", *i.Name, "ID", *i.ID) img = &imagesList.Images[j] return true, "", nil } @@ -1089,8 +1096,7 @@ func (m *MachineScope) SetProviderID(id *string) error { if options.ProviderIDFormatType(options.ProviderIDFormat) == options.ProviderIDFormatV2 { accountID, err := utils.GetAccountIDWrapper() if err != nil { - m.Logger.Error(err, "failed to get cloud account id", err.Error()) - return err + return fmt.Errorf("failed to get cloud account id: %w", err) } m.IBMVPCMachine.Spec.ProviderID = ptr.To(fmt.Sprintf("ibm://%s///%s/%s", accountID, m.Machine.Spec.ClusterName, *id)) } else { diff --git a/cloud/scope/machine_test.go b/cloud/scope/machine_test.go index adf022e62..0ee0a43c9 100644 --- a/cloud/scope/machine_test.go +++ b/cloud/scope/machine_test.go @@ -29,7 +29,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/scheme" - "k8s.io/klog/v2" "k8s.io/utils/ptr" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/controller-runtime/pkg/client" @@ -79,7 +78,6 @@ func setupMachineScope(clusterName string, machineName string, mockvpc *mock.Moc client := fake.NewClientBuilder().WithScheme(scheme.Scheme).WithObjects(initObjects...).Build() return &MachineScope{ Client: client, - Logger: klog.Background(), IBMVPCClient: mockvpc, Cluster: cluster, Machine: machine, @@ -197,7 +195,7 @@ func TestCreateMachine(t *testing.T) { mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(&vpcv1.InstanceCollection{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().GetVPCSubnetByName(vpcMachine.Spec.PrimaryNetworkInterface.Subnet).Return(&vpcv1.Subnet{ID: core.StringPtr("subnet-name")}, nil) mockvpc.EXPECT().CreateInstance(gomock.AssignableToTypeOf(&vpcv1.CreateInstanceOptions{})).Return(instance, &core.DetailedResponse{}, nil) - out, err := scope.CreateMachine() + out, err := scope.CreateMachine(ctx) g.Expect(err).To(BeNil()) require.Equal(t, expectedOutput, out) }) @@ -218,7 +216,7 @@ func TestCreateMachine(t *testing.T) { }, } mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(instanceCollection, &core.DetailedResponse{}, nil) - out, err := scope.CreateMachine() + out, err := scope.CreateMachine(ctx) g.Expect(err).To(BeNil()) require.Equal(t, expectedOutput, out) }) @@ -229,7 +227,7 @@ func TestCreateMachine(t *testing.T) { t.Cleanup(mockController.Finish) scope := setupMachineScope(clusterName, machineName, mockvpc) mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(&vpcv1.InstanceCollection{}, &core.DetailedResponse{}, errors.New("Error when listing instances")) - _, err := scope.CreateMachine() + _, err := scope.CreateMachine(ctx) g.Expect(err).To(Not(BeNil())) }) @@ -240,7 +238,7 @@ func TestCreateMachine(t *testing.T) { scope := setupMachineScope(clusterName, machineName, mockvpc) scope.Machine.Spec.Bootstrap.DataSecretName = nil mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(&vpcv1.InstanceCollection{}, &core.DetailedResponse{}, nil) - _, err := scope.CreateMachine() + _, err := scope.CreateMachine(ctx) g.Expect(err).To(Not(BeNil())) }) @@ -251,7 +249,7 @@ func TestCreateMachine(t *testing.T) { scope := setupMachineScope(clusterName, machineName, mockvpc) scope.Machine.Spec.Bootstrap.DataSecretName = core.StringPtr("foo-secret-temp") mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(&vpcv1.InstanceCollection{}, &core.DetailedResponse{}, nil) - _, err := scope.CreateMachine() + _, err := scope.CreateMachine(ctx) g.Expect(err).To(Not(BeNil())) }) @@ -273,7 +271,7 @@ func TestCreateMachine(t *testing.T) { }} g.Expect(scope.Client.Update(context.Background(), secret)).To(Succeed()) mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(&vpcv1.InstanceCollection{}, &core.DetailedResponse{}, nil) - _, err := scope.CreateMachine() + _, err := scope.CreateMachine(ctx) g.Expect(err).To(Not(BeNil())) }) @@ -286,7 +284,7 @@ func TestCreateMachine(t *testing.T) { mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(&vpcv1.InstanceCollection{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().GetVPCSubnetByName(vpcMachine.Spec.PrimaryNetworkInterface.Subnet).Return(&vpcv1.Subnet{ID: core.StringPtr("subnet-id")}, nil) mockvpc.EXPECT().CreateInstance(gomock.AssignableToTypeOf(&vpcv1.CreateInstanceOptions{})).Return(nil, &core.DetailedResponse{}, errors.New("Failed when creating instance")) - _, err := scope.CreateMachine() + _, err := scope.CreateMachine(ctx) g.Expect(err).To(Not(BeNil())) }) @@ -319,7 +317,7 @@ func TestCreateMachine(t *testing.T) { // TODO(cjschaef): Enhance the mock Options parameter to validate the Network Status ControlPlaneSubnets ID was used. mockvpc.EXPECT().CreateInstance(gomock.AssignableToTypeOf(&vpcv1.CreateInstanceOptions{})).Return(instance, &core.DetailedResponse{}, nil) - out, err := scope.CreateMachine() + out, err := scope.CreateMachine(ctx) g.Expect(err).To(BeNil()) require.Equal(t, expectedOutput, out) }) @@ -363,7 +361,7 @@ func TestCreateMachine(t *testing.T) { // TODO(cjschaef): Enhance the mock Options parameter to validate the Network Status Security Group ID was used. mockvpc.EXPECT().CreateInstance(gomock.AssignableToTypeOf(&vpcv1.CreateInstanceOptions{})).Return(instance, &core.DetailedResponse{}, nil) - out, err := scope.CreateMachine() + out, err := scope.CreateMachine(ctx) g.Expect(err).To(BeNil()) require.Equal(t, expectedOutput, out) }) @@ -394,7 +392,7 @@ func TestCreateMachine(t *testing.T) { mockvpc.EXPECT().GetSecurityGroupByName("security-group-1").Return(&vpcv1.SecurityGroup{ID: core.StringPtr("security-group-id-1")}, nil) mockvpc.EXPECT().CreateInstance(gomock.AssignableToTypeOf(&vpcv1.CreateInstanceOptions{})).Return(instance, &core.DetailedResponse{}, nil) - out, err := scope.CreateMachine() + out, err := scope.CreateMachine(ctx) g.Expect(err).To(BeNil()) require.Equal(t, expectedOutput, out) }) @@ -425,7 +423,7 @@ func TestCreateMachine(t *testing.T) { mockvpc.EXPECT().GetSecurityGroup(gomock.AssignableToTypeOf(&vpcv1.GetSecurityGroupOptions{})).Return(&vpcv1.SecurityGroup{ID: core.StringPtr("security-group-id-1")}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().CreateInstance(gomock.AssignableToTypeOf(&vpcv1.CreateInstanceOptions{})).Return(instance, &core.DetailedResponse{}, nil) - out, err := scope.CreateMachine() + out, err := scope.CreateMachine(ctx) g.Expect(err).To(BeNil()) require.Equal(t, expectedOutput, out) }) @@ -455,7 +453,7 @@ func TestCreateMachine(t *testing.T) { // TODO(cjschaef): Enhance the mock Options parameter to validate the Network Status VPC ID was used. mockvpc.EXPECT().CreateInstance(gomock.AssignableToTypeOf(&vpcv1.CreateInstanceOptions{})).Return(instance, &core.DetailedResponse{}, nil) - out, err := scope.CreateMachine() + out, err := scope.CreateMachine(ctx) g.Expect(err).To(BeNil()) require.Equal(t, expectedOutput, out) }) @@ -471,7 +469,7 @@ func TestCreateMachine(t *testing.T) { } scope.IBMVPCMachine.Spec = vpcMachine.Spec mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(&vpcv1.InstanceCollection{}, &core.DetailedResponse{}, nil) - _, err := scope.CreateMachine() + _, err := scope.CreateMachine(ctx) g.Expect(err).To(Not(BeNil())) }) @@ -497,7 +495,7 @@ func TestCreateMachine(t *testing.T) { scope.IBMVPCMachine.Spec = vpcMachine.Spec mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(&vpcv1.InstanceCollection{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().GetVPCSubnetByName(vpcMachine.Spec.PrimaryNetworkInterface.Subnet).Return(&vpcv1.Subnet{ID: core.StringPtr("subnet-id")}, nil) - _, err := scope.CreateMachine() + _, err := scope.CreateMachine(ctx) g.Expect(err).To(Not(BeNil())) }) @@ -526,7 +524,7 @@ func TestCreateMachine(t *testing.T) { mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(&vpcv1.InstanceCollection{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().GetVPCSubnetByName(vpcMachine.Spec.PrimaryNetworkInterface.Subnet).Return(&vpcv1.Subnet{ID: core.StringPtr("subnet-id")}, nil) mockvpc.EXPECT().ListKeys(gomock.AssignableToTypeOf(&vpcv1.ListKeysOptions{})).Return(nil, &core.DetailedResponse{}, errors.New("Failed when creating instance")) - _, err := scope.CreateMachine() + _, err := scope.CreateMachine(ctx) g.Expect(err).To(Not(BeNil())) }) @@ -563,7 +561,7 @@ func TestCreateMachine(t *testing.T) { mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(&vpcv1.InstanceCollection{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().GetVPCSubnetByName(vpcMachine.Spec.PrimaryNetworkInterface.Subnet).Return(&vpcv1.Subnet{ID: core.StringPtr("subnet-id")}, nil) mockvpc.EXPECT().ListKeys(gomock.AssignableToTypeOf(&vpcv1.ListKeysOptions{})).Return(keyCollection, &core.DetailedResponse{}, nil) - _, err := scope.CreateMachine() + _, err := scope.CreateMachine(ctx) g.Expect(err).To(Not(BeNil())) }) @@ -616,7 +614,7 @@ func TestCreateMachine(t *testing.T) { mockvpc.EXPECT().ListImages(gomock.AssignableToTypeOf(&vpcv1.ListImagesOptions{})).Return(imageCollection, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListKeys(gomock.AssignableToTypeOf(&vpcv1.ListKeysOptions{})).Return(keyCollection, &core.DetailedResponse{}, nil) mockvpc.EXPECT().CreateInstance(gomock.AssignableToTypeOf(&vpcv1.CreateInstanceOptions{})).Return(instance, &core.DetailedResponse{}, nil) - out, err := scope.CreateMachine() + out, err := scope.CreateMachine(ctx) g.Expect(err).To(BeNil()) require.Equal(t, expectedOutput, out) }) @@ -638,7 +636,7 @@ func TestCreateMachine(t *testing.T) { scope.IBMVPCMachine.Spec = vpcMachine.Spec mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(&vpcv1.InstanceCollection{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().GetVPCSubnetByName(vpcMachine.Spec.PrimaryNetworkInterface.Subnet).Return(&vpcv1.Subnet{ID: core.StringPtr("subnet-id")}, nil) - _, err := scope.CreateMachine() + _, err := scope.CreateMachine(ctx) g.Expect(err).To(Not(BeNil())) }) @@ -662,7 +660,7 @@ func TestCreateMachine(t *testing.T) { mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(&vpcv1.InstanceCollection{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().GetVPCSubnetByName(vpcMachine.Spec.PrimaryNetworkInterface.Subnet).Return(&vpcv1.Subnet{ID: core.StringPtr("subnet-id")}, nil) mockvpc.EXPECT().ListImages(gomock.AssignableToTypeOf(&vpcv1.ListImagesOptions{})).Return(nil, &core.DetailedResponse{}, errors.New("Failed when listing Images")) - _, err := scope.CreateMachine() + _, err := scope.CreateMachine(ctx) g.Expect(err).To(Not(BeNil())) }) @@ -694,7 +692,7 @@ func TestCreateMachine(t *testing.T) { mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(&vpcv1.InstanceCollection{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().GetVPCSubnetByName(vpcMachine.Spec.PrimaryNetworkInterface.Subnet).Return(&vpcv1.Subnet{ID: core.StringPtr("subnet-id")}, nil) mockvpc.EXPECT().ListImages(gomock.AssignableToTypeOf(&vpcv1.ListImagesOptions{})).Return(imageCollection, &core.DetailedResponse{}, nil) - _, err := scope.CreateMachine() + _, err := scope.CreateMachine(ctx) g.Expect(err).To(Not(BeNil())) }) @@ -731,7 +729,7 @@ func TestCreateMachine(t *testing.T) { mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(&vpcv1.InstanceCollection{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().GetVPCSubnetByName(vpcMachine.Spec.PrimaryNetworkInterface.Subnet).Return(&vpcv1.Subnet{ID: core.StringPtr("subnet-id")}, nil) mockvpc.EXPECT().CreateInstance(gomock.AssignableToTypeOf(&vpcv1.CreateInstanceOptions{})).Return(instance, &core.DetailedResponse{}, nil) - out, err := scope.CreateMachine() + out, err := scope.CreateMachine(ctx) g.Expect(err).To(BeNil()) require.Equal(t, expectedOutput, out) }) @@ -828,7 +826,7 @@ func TestCreateVPCLoadBalancerPoolMember(t *testing.T) { scope.IBMVPCMachine.Spec = vpcMachine.Spec scope.IBMVPCMachine.Status = vpcMachine.Status mockvpc.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(&vpcv1.LoadBalancer{}, &core.DetailedResponse{}, errors.New("Could not fetch LoadBalancer")) - _, err := scope.CreateVPCLoadBalancerPoolMember(&scope.IBMVPCMachine.Status.Addresses[0].Address, int64(infrav1.DefaultAPIServerPort)) + _, err := scope.CreateVPCLoadBalancerPoolMember(ctx, &scope.IBMVPCMachine.Status.Addresses[0].Address, int64(infrav1.DefaultAPIServerPort)) g.Expect(err).To(Not(BeNil())) }) t.Run("Error when LoadBalancer is not active", func(t *testing.T) { @@ -843,7 +841,7 @@ func TestCreateVPCLoadBalancerPoolMember(t *testing.T) { ProvisioningStatus: core.StringPtr("pending"), } mockvpc.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(loadBalancer, &core.DetailedResponse{}, nil) - _, err := scope.CreateVPCLoadBalancerPoolMember(&scope.IBMVPCMachine.Status.Addresses[0].Address, int64(infrav1.DefaultAPIServerPort)) + _, err := scope.CreateVPCLoadBalancerPoolMember(ctx, &scope.IBMVPCMachine.Status.Addresses[0].Address, int64(infrav1.DefaultAPIServerPort)) g.Expect(err).To(Not(BeNil())) }) t.Run("Error when no pool exist", func(t *testing.T) { @@ -859,7 +857,7 @@ func TestCreateVPCLoadBalancerPoolMember(t *testing.T) { Pools: []vpcv1.LoadBalancerPoolReference{}, } mockvpc.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(loadBalancer, &core.DetailedResponse{}, nil) - _, err := scope.CreateVPCLoadBalancerPoolMember(&scope.IBMVPCMachine.Status.Addresses[0].Address, int64(infrav1.DefaultAPIServerPort)) + _, err := scope.CreateVPCLoadBalancerPoolMember(ctx, &scope.IBMVPCMachine.Status.Addresses[0].Address, int64(infrav1.DefaultAPIServerPort)) g.Expect(err).To(Not(BeNil())) }) t.Run("Error when listing LoadBalancerPoolMembers", func(t *testing.T) { @@ -871,7 +869,7 @@ func TestCreateVPCLoadBalancerPoolMember(t *testing.T) { scope.IBMVPCMachine.Status = vpcMachine.Status mockvpc.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(loadBalancer, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListLoadBalancerPoolMembers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancerPoolMembersOptions{})).Return(&vpcv1.LoadBalancerPoolMemberCollection{}, &core.DetailedResponse{}, errors.New("Failed to list LoadBalancerPoolMembers")) - _, err := scope.CreateVPCLoadBalancerPoolMember(&scope.IBMVPCMachine.Status.Addresses[0].Address, int64(infrav1.DefaultAPIServerPort)) + _, err := scope.CreateVPCLoadBalancerPoolMember(ctx, &scope.IBMVPCMachine.Status.Addresses[0].Address, int64(infrav1.DefaultAPIServerPort)) g.Expect(err).To(Not(BeNil())) }) t.Run("PoolMember already exist", func(t *testing.T) { @@ -893,7 +891,7 @@ func TestCreateVPCLoadBalancerPoolMember(t *testing.T) { } mockvpc.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(loadBalancer, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListLoadBalancerPoolMembers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancerPoolMembersOptions{})).Return(loadBalancerPoolMemberCollection, &core.DetailedResponse{}, nil) - _, err := scope.CreateVPCLoadBalancerPoolMember(&scope.IBMVPCMachine.Status.Addresses[0].Address, int64(infrav1.DefaultAPIServerPort)) + _, err := scope.CreateVPCLoadBalancerPoolMember(ctx, &scope.IBMVPCMachine.Status.Addresses[0].Address, int64(infrav1.DefaultAPIServerPort)) g.Expect(err).To(BeNil()) }) t.Run("Error when creating LoadBalancerPoolMember", func(t *testing.T) { @@ -906,7 +904,7 @@ func TestCreateVPCLoadBalancerPoolMember(t *testing.T) { mockvpc.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(loadBalancer, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListLoadBalancerPoolMembers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancerPoolMembersOptions{})).Return(&vpcv1.LoadBalancerPoolMemberCollection{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().CreateLoadBalancerPoolMember(gomock.AssignableToTypeOf(&vpcv1.CreateLoadBalancerPoolMemberOptions{})).Return(&vpcv1.LoadBalancerPoolMember{}, &core.DetailedResponse{}, errors.New("Failed to create LoadBalancerPoolMember")) - _, err := scope.CreateVPCLoadBalancerPoolMember(&scope.IBMVPCMachine.Status.Addresses[0].Address, int64(64)) + _, err := scope.CreateVPCLoadBalancerPoolMember(ctx, &scope.IBMVPCMachine.Status.Addresses[0].Address, int64(64)) g.Expect(err).To(Not(BeNil())) }) t.Run("Should create VPCLoadBalancerPoolMember", func(t *testing.T) { @@ -927,7 +925,7 @@ func TestCreateVPCLoadBalancerPoolMember(t *testing.T) { mockvpc.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(loadBalancer, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListLoadBalancerPoolMembers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancerPoolMembersOptions{})).Return(&vpcv1.LoadBalancerPoolMemberCollection{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().CreateLoadBalancerPoolMember(gomock.AssignableToTypeOf(&vpcv1.CreateLoadBalancerPoolMemberOptions{})).Return(loadBalancerPoolMember, &core.DetailedResponse{}, nil) - out, err := scope.CreateVPCLoadBalancerPoolMember(&scope.IBMVPCMachine.Status.Addresses[0].Address, int64(infrav1.DefaultAPIServerPort)) + out, err := scope.CreateVPCLoadBalancerPoolMember(ctx, &scope.IBMVPCMachine.Status.Addresses[0].Address, int64(infrav1.DefaultAPIServerPort)) g.Expect(err).To(BeNil()) require.Equal(t, expectedOutput, out) }) @@ -992,7 +990,7 @@ func TestDeleteVPCLoadBalancerPoolMember(t *testing.T) { scope.IBMVPCMachine.Spec = vpcMachine.Spec scope.IBMVPCMachine.Status = vpcMachine.Status mockvpc.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(&vpcv1.LoadBalancer{}, &core.DetailedResponse{}, errors.New("Could not fetch LoadBalancer")) - err := scope.DeleteVPCLoadBalancerPoolMember() + err := scope.DeleteVPCLoadBalancerPoolMember(ctx) g.Expect(err).To(Not(BeNil())) }) t.Run("No pools associated with load balancer", func(t *testing.T) { @@ -1003,7 +1001,7 @@ func TestDeleteVPCLoadBalancerPoolMember(t *testing.T) { scope.IBMVPCMachine.Spec = vpcMachine.Spec scope.IBMVPCMachine.Status = vpcMachine.Status mockvpc.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(&vpcv1.LoadBalancer{}, &core.DetailedResponse{}, nil) - err := scope.DeleteVPCLoadBalancerPoolMember() + err := scope.DeleteVPCLoadBalancerPoolMember(ctx) g.Expect(err).To(BeNil()) }) t.Run("Error when fetching Instance", func(t *testing.T) { @@ -1015,7 +1013,7 @@ func TestDeleteVPCLoadBalancerPoolMember(t *testing.T) { scope.IBMVPCMachine.Status = vpcMachine.Status mockvpc.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(loadBalancer, &core.DetailedResponse{}, nil) mockvpc.EXPECT().GetInstance(gomock.AssignableToTypeOf(&vpcv1.GetInstanceOptions{})).Return(&vpcv1.Instance{}, &core.DetailedResponse{}, errors.New("Failed to fetch Instance")) - err := scope.DeleteVPCLoadBalancerPoolMember() + err := scope.DeleteVPCLoadBalancerPoolMember(ctx) g.Expect(err).To(Not(BeNil())) }) t.Run("Error when listing LoadBalancerPoolMembers", func(t *testing.T) { @@ -1028,7 +1026,7 @@ func TestDeleteVPCLoadBalancerPoolMember(t *testing.T) { mockvpc.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(loadBalancer, &core.DetailedResponse{}, nil) mockvpc.EXPECT().GetInstance(gomock.AssignableToTypeOf(&vpcv1.GetInstanceOptions{})).Return(&vpcv1.Instance{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListLoadBalancerPoolMembers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancerPoolMembersOptions{})).Return(&vpcv1.LoadBalancerPoolMemberCollection{}, &core.DetailedResponse{}, errors.New("Failed to list LoadBalancerPoolMembers")) - err := scope.DeleteVPCLoadBalancerPoolMember() + err := scope.DeleteVPCLoadBalancerPoolMember(ctx) g.Expect(err).To(Not(BeNil())) }) t.Run("No members in load balancer pool", func(t *testing.T) { @@ -1041,7 +1039,7 @@ func TestDeleteVPCLoadBalancerPoolMember(t *testing.T) { mockvpc.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(loadBalancer, &core.DetailedResponse{}, nil) mockvpc.EXPECT().GetInstance(gomock.AssignableToTypeOf(&vpcv1.GetInstanceOptions{})).Return(&vpcv1.Instance{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListLoadBalancerPoolMembers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancerPoolMembersOptions{})).Return(&vpcv1.LoadBalancerPoolMemberCollection{}, &core.DetailedResponse{}, nil) - err := scope.DeleteVPCLoadBalancerPoolMember() + err := scope.DeleteVPCLoadBalancerPoolMember(ctx) g.Expect(err).To(BeNil()) }) t.Run("Error when load balancer is not in active state", func(t *testing.T) { @@ -1063,7 +1061,7 @@ func TestDeleteVPCLoadBalancerPoolMember(t *testing.T) { mockvpc.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(loadBalancer, &core.DetailedResponse{}, nil) mockvpc.EXPECT().GetInstance(gomock.AssignableToTypeOf(&vpcv1.GetInstanceOptions{})).Return(instance, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListLoadBalancerPoolMembers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancerPoolMembersOptions{})).Return(loadBalancerPoolMemberCollection, &core.DetailedResponse{}, nil) - err := scope.DeleteVPCLoadBalancerPoolMember() + err := scope.DeleteVPCLoadBalancerPoolMember(ctx) g.Expect(err).To(Not(BeNil())) }) t.Run("Error when deleting load balancer pool member", func(t *testing.T) { @@ -1077,7 +1075,7 @@ func TestDeleteVPCLoadBalancerPoolMember(t *testing.T) { mockvpc.EXPECT().GetInstance(gomock.AssignableToTypeOf(&vpcv1.GetInstanceOptions{})).Return(instance, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListLoadBalancerPoolMembers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancerPoolMembersOptions{})).Return(loadBalancerPoolMemberCollection, &core.DetailedResponse{}, nil) mockvpc.EXPECT().DeleteLoadBalancerPoolMember(gomock.AssignableToTypeOf(&vpcv1.DeleteLoadBalancerPoolMemberOptions{})).Return(&core.DetailedResponse{}, errors.New("Failed to delete LoadBalancerPoolMember")) - err := scope.DeleteVPCLoadBalancerPoolMember() + err := scope.DeleteVPCLoadBalancerPoolMember(ctx) g.Expect(err).To(Not(BeNil())) }) t.Run("Should delete load balancer pool", func(t *testing.T) { @@ -1091,7 +1089,7 @@ func TestDeleteVPCLoadBalancerPoolMember(t *testing.T) { mockvpc.EXPECT().GetInstance(gomock.AssignableToTypeOf(&vpcv1.GetInstanceOptions{})).Return(instance, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListLoadBalancerPoolMembers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancerPoolMembersOptions{})).Return(loadBalancerPoolMemberCollection, &core.DetailedResponse{}, nil) mockvpc.EXPECT().DeleteLoadBalancerPoolMember(gomock.AssignableToTypeOf(&vpcv1.DeleteLoadBalancerPoolMemberOptions{})).Return(&core.DetailedResponse{}, nil) - err := scope.DeleteVPCLoadBalancerPoolMember() + err := scope.DeleteVPCLoadBalancerPoolMember(ctx) g.Expect(err).To(BeNil()) }) }) diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmvpcmachines.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmvpcmachines.yaml index 50e8ebdb5..8539b241d 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmvpcmachines.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_ibmvpcmachines.yaml @@ -626,6 +626,74 @@ spec: ready: description: Ready is true when the provider resource is ready. type: boolean + v1beta2: + description: V1beta2 groups all the fields that will be added or modified + in IBMVPCMachine's status with the V1Beta2 version. + properties: + conditions: + description: Conditions represents the observations of a IBMVPCMachine's + current state. + items: + description: Condition contains details for one aspect of the + current state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, + Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + maxItems: 32 + type: array + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + type: object type: object type: object served: true diff --git a/controllers/ibmvpcmachine_controller.go b/controllers/ibmvpcmachine_controller.go index 8d14aa522..11efdcef6 100644 --- a/controllers/ibmvpcmachine_controller.go +++ b/controllers/ibmvpcmachine_controller.go @@ -26,8 +26,11 @@ import ( "github.com/IBM/vpc-go-sdk/vpcv1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" + "k8s.io/klog/v2" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -36,7 +39,11 @@ import ( clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" //nolint:staticcheck clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" - v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" //nolint:staticcheck + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" //nolint:staticcheck + v1beta2conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions/v1beta2" //nolint:staticcheck + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" //nolint:staticcheck + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/paused" + "sigs.k8s.io/cluster-api/util/finalizers" infrav1 "sigs.k8s.io/cluster-api-provider-ibmcloud/api/v1beta2" "sigs.k8s.io/cluster-api-provider-ibmcloud/cloud/scope" @@ -61,12 +68,14 @@ type IBMVPCMachineReconciler struct { // Reconcile implements controller runtime Reconciler interface and handles reconcileation logic for IBMVPCMachine. func (r *IBMVPCMachineReconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { - log := r.Log.WithValues("ibmvpcmachine", req.NamespacedName) + log := ctrl.LoggerFrom(ctx) - // Fetch the IBMVPCMachine instance. + log.Info("Reconciling IBMVPCMachine") + defer log.Info("Finished reconciling IBMVPCMachine") - ibmVpcMachine := &infrav1.IBMVPCMachine{} - err := r.Get(ctx, req.NamespacedName, ibmVpcMachine) + // Fetch the IBMVPCMachine instance. + ibmVPCMachine := &infrav1.IBMVPCMachine{} + err := r.Get(ctx, req.NamespacedName, ibmVPCMachine) if err != nil { if apierrors.IsNotFound(err) { return ctrl.Result{}, nil @@ -74,7 +83,7 @@ func (r *IBMVPCMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, err } // Fetch the Machine. - machine, err := util.GetOwnerMachine(ctx, r.Client, ibmVpcMachine.ObjectMeta) + machine, err := util.GetOwnerMachine(ctx, r.Client, ibmVPCMachine.ObjectMeta) if err != nil { return ctrl.Result{}, err } @@ -84,54 +93,70 @@ func (r *IBMVPCMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reques } // Fetch the Cluster. - cluster, err := util.GetClusterFromMetadata(ctx, r.Client, ibmVpcMachine.ObjectMeta) + cluster, err := util.GetClusterFromMetadata(ctx, r.Client, ibmVPCMachine.ObjectMeta) if err != nil { log.Info("Machine is missing cluster label or cluster does not exist") return ctrl.Result{}, nil } - log = log.WithValues("cluster", cluster.Name) - - ibmCluster := &infrav1.IBMVPCCluster{} - ibmVpcClusterName := client.ObjectKey{ - Namespace: ibmVpcMachine.Namespace, + ibmVPCCluster := &infrav1.IBMVPCCluster{} + ibmVPCClusterName := client.ObjectKey{ + Namespace: ibmVPCMachine.Namespace, Name: cluster.Spec.InfrastructureRef.Name, } - if err := r.Client.Get(ctx, ibmVpcClusterName, ibmCluster); err != nil { + if err := r.Client.Get(ctx, ibmVPCClusterName, ibmVPCCluster); err != nil { log.Info("IBMVPCCluster is not available yet") return ctrl.Result{}, nil } + // Add finalizer first if not set to avoid the race condition between init and delete. + if finalizerAdded, err := finalizers.EnsureFinalizer(ctx, r.Client, ibmVPCMachine, infrav1.MachineFinalizer); err != nil || finalizerAdded { + return ctrl.Result{}, err + } + + log = log.WithValues("Cluster", klog.KObj(cluster)) + ctx = ctrl.LoggerInto(ctx, log) + + // Initialize the patch helper. + patchHelper, err := v1beta1patch.NewHelper(ibmVPCMachine, r.Client) + if err != nil { + return ctrl.Result{}, fmt.Errorf("failed to initialize patch helper: %w", err) + } + + // Always attempt to Patch the IBMVPCMachine object and status after each reconciliation. + defer func() { + if err := patchIBMVPCMachine(ctx, patchHelper, ibmVPCMachine); err != nil { + reterr = kerrors.NewAggregate([]error{reterr, err}) + } + }() + + if isPaused, requeue, err := paused.EnsurePausedCondition(ctx, r.Client, cluster, ibmVPCMachine); err != nil || isPaused || requeue { + return ctrl.Result{}, err + } + // Create the machine scope. machineScope, err := scope.NewMachineScope(scope.MachineScopeParams{ Client: r.Client, - Logger: log, Cluster: cluster, - IBMVPCCluster: ibmCluster, + IBMVPCCluster: ibmVPCCluster, Machine: machine, - IBMVPCMachine: ibmVpcMachine, + IBMVPCMachine: ibmVPCMachine, ServiceEndpoint: r.ServiceEndpoint, }) if err != nil { return ctrl.Result{}, fmt.Errorf("failed to create scope: %w", err) } - // Always close the scope when exiting this function, so we can persist any IBMVPCMachine changes. - defer func() { - if machineScope != nil { - if err := machineScope.Close(); err != nil && reterr == nil { - reterr = err - } - } - }() + log = log.WithValues("IBMVPCMachine", klog.KObj(ibmVPCMachine)) + ctx = ctrl.LoggerInto(ctx, log) // Handle deleted machines. - if !ibmVpcMachine.DeletionTimestamp.IsZero() { - return r.reconcileDelete(machineScope) + if !ibmVPCMachine.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, machineScope) } // Handle non-deleted machines. - return r.reconcileNormal(machineScope) + return r.reconcileNormal(ctx, machineScope) } // SetupWithManager creates a new IBMVPCMachine controller for a manager. @@ -141,14 +166,15 @@ func (r *IBMVPCMachineReconciler) SetupWithManager(mgr ctrl.Manager) error { Complete(r) } -func (r *IBMVPCMachineReconciler) reconcileNormal(machineScope *scope.MachineScope) (ctrl.Result, error) { //nolint:gocyclo +func (r *IBMVPCMachineReconciler) reconcileNormal(ctx context.Context, machineScope *scope.MachineScope) (ctrl.Result, error) { //nolint:gocyclo + log := ctrl.LoggerFrom(ctx) if controllerutil.AddFinalizer(machineScope.IBMVPCMachine, infrav1.MachineFinalizer) { return ctrl.Result{}, nil } // Make sure bootstrap data is available and populated. if machineScope.Machine.Spec.Bootstrap.DataSecretName == nil { - machineScope.Info("Bootstrap data secret reference is not yet available") + log.Info("Bootstrap data secret reference is not yet available") return ctrl.Result{RequeueAfter: 1 * time.Minute}, nil } @@ -158,7 +184,7 @@ func (r *IBMVPCMachineReconciler) reconcileNormal(machineScope *scope.MachineSco } } - instance, err := r.getOrCreate(machineScope) + instance, err := r.getOrCreate(ctx, machineScope) if err != nil { return ctrl.Result{}, fmt.Errorf("failed to reconcile VSI for IBMVPCMachine %s/%s: %w", machineScope.IBMVPCMachine.Namespace, machineScope.IBMVPCMachine.Name, err) } @@ -183,9 +209,34 @@ func (r *IBMVPCMachineReconciler) reconcileNormal(machineScope *scope.MachineSco case vpcv1.InstanceStatusPendingConst: machineScope.SetNotReady() v1beta1conditions.MarkFalse(machineScope.IBMVPCMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotReadyReason, clusterv1beta1.ConditionSeverityWarning, "") + v1beta2conditions.Set(machineScope.IBMVPCMachine, metav1.Condition{ + Type: infrav1.IBMVPCMachineInstanceReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.IBMVPCMachineInstanceNotReadyV1Beta2Reason, + }) + case vpcv1.InstanceStatusStartingConst: + machineScope.SetNotReady() + v1beta1conditions.MarkFalse(machineScope.IBMVPCMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotReadyReason, clusterv1beta1.ConditionSeverityWarning, "") + v1beta2conditions.Set(machineScope.IBMVPCMachine, metav1.Condition{ + Type: infrav1.IBMVPCMachineInstanceReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.IBMVPCMachineInstanceNotReadyV1Beta2Reason, + }) case vpcv1.InstanceStatusStoppedConst: machineScope.SetNotReady() v1beta1conditions.MarkFalse(machineScope.IBMVPCMachine, infrav1.InstanceReadyCondition, infrav1.InstanceStoppedReason, clusterv1beta1.ConditionSeverityError, "") + v1beta2conditions.Set(machineScope.IBMVPCMachine, metav1.Condition{ + Type: infrav1.IBMVPCMachineInstanceReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.InstanceStoppedReason, + }) + case vpcv1.InstanceStatusDeletingConst: + v1beta1conditions.MarkFalse(machineScope.IBMVPCMachine, infrav1.InstanceReadyCondition, infrav1.InstanceDeletingReason, clusterv1beta1.ConditionSeverityError, "") + v1beta2conditions.Set(machineScope.IBMVPCMachine, metav1.Condition{ + Type: infrav1.IBMVPCMachineInstanceReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.InstanceDeletingReason, + }) case vpcv1.InstanceStatusFailedConst: msg := "" healthReasonsLen := len(instance.HealthReasons) @@ -198,18 +249,33 @@ func (r *IBMVPCMachineReconciler) reconcileNormal(machineScope *scope.MachineSco machineScope.SetFailureReason(infrav1.UpdateMachineError) machineScope.SetFailureMessage(msg) v1beta1conditions.MarkFalse(machineScope.IBMVPCMachine, infrav1.InstanceReadyCondition, infrav1.InstanceErroredReason, clusterv1beta1.ConditionSeverityError, "%s", msg) + v1beta2conditions.Set(machineScope.IBMVPCMachine, metav1.Condition{ + Type: infrav1.IBMVPCMachineInstanceReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.InstanceErroredReason, + }) capibmrecord.Warnf(machineScope.IBMVPCMachine, "FailedBuildInstance", "Failed to build the instance - %s", msg) return ctrl.Result{}, nil case vpcv1.InstanceStatusRunningConst: machineRunning = true default: machineScope.SetNotReady() - machineScope.V(3).Info("unexpected vpc instance status", "instanceStatus", *instance.Status, "instanceID", machineScope.GetInstanceID()) + log.V(3).Info("unexpected vpc instance status", "instanceStatus", *instance.Status, "instanceID", machineScope.GetInstanceID()) v1beta1conditions.MarkUnknown(machineScope.IBMVPCMachine, infrav1.InstanceReadyCondition, "", "") + v1beta2conditions.Set(machineScope.IBMVPCMachine, metav1.Condition{ + Type: infrav1.IBMVPCMachineInstanceReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.InstanceStateUnknownReason, + }) } } else { machineScope.SetNotReady() v1beta1conditions.MarkUnknown(machineScope.IBMVPCMachine, infrav1.InstanceReadyCondition, infrav1.InstanceStateUnknownReason, "") + v1beta2conditions.Set(machineScope.IBMVPCMachine, metav1.Condition{ + Type: infrav1.IBMVPCMachineInstanceReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.InstanceStateUnknownReason, + }) } // Check if the Machine is running. @@ -222,7 +288,7 @@ func (r *IBMVPCMachineReconciler) reconcileNormal(machineScope *scope.MachineSco if len(machineScope.IBMVPCMachine.Spec.LoadBalancerPoolMembers) > 0 { needsRequeue := false for _, poolMember := range machineScope.IBMVPCMachine.Spec.LoadBalancerPoolMembers { - requeue, err := machineScope.ReconcileVPCLoadBalancerPoolMember(poolMember) + requeue, err := machineScope.ReconcileVPCLoadBalancerPoolMember(ctx, poolMember) if err != nil { return ctrl.Result{}, fmt.Errorf("error failed to reconcile machine's pool member: %w", err) } else if requeue { @@ -246,7 +312,7 @@ func (r *IBMVPCMachineReconciler) reconcileNormal(machineScope *scope.MachineSco } internalIP := instance.PrimaryNetworkInterface.PrimaryIP.Address port := int64(machineScope.APIServerPort()) - poolMember, err := machineScope.CreateVPCLoadBalancerPoolMember(internalIP, port) + poolMember, err := machineScope.CreateVPCLoadBalancerPoolMember(ctx, internalIP, port) if err != nil { return ctrl.Result{}, fmt.Errorf("failed to bind port %d to control plane %s/%s: %w", port, machineScope.IBMVPCMachine.Namespace, machineScope.IBMVPCMachine.Name, err) } @@ -259,25 +325,31 @@ func (r *IBMVPCMachineReconciler) reconcileNormal(machineScope *scope.MachineSco // With a running machine and all Load Balancer Pool Members reconciled, mark machine as ready. machineScope.SetReady() v1beta1conditions.MarkTrue(machineScope.IBMVPCMachine, infrav1.InstanceReadyCondition) + v1beta2conditions.Set(machineScope.IBMVPCMachine, metav1.Condition{ + Type: infrav1.IBMVPCMachineInstanceReadyV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: infrav1.IBMVPCMachineInstanceReadyV1Beta2Reason, + }) return ctrl.Result{}, nil } -func (r *IBMVPCMachineReconciler) getOrCreate(scope *scope.MachineScope) (*vpcv1.Instance, error) { - instance, err := scope.CreateMachine() +func (r *IBMVPCMachineReconciler) getOrCreate(ctx context.Context, scope *scope.MachineScope) (*vpcv1.Instance, error) { + instance, err := scope.CreateMachine(ctx) return instance, err } -func (r *IBMVPCMachineReconciler) reconcileDelete(scope *scope.MachineScope) (_ ctrl.Result, reterr error) { - scope.Info("Handling deleted IBMVPCMachine") +func (r *IBMVPCMachineReconciler) reconcileDelete(ctx context.Context, scope *scope.MachineScope) (_ ctrl.Result, reterr error) { + log := ctrl.LoggerFrom(ctx) + log.Info("Handling deleted IBMVPCMachine") if _, ok := scope.IBMVPCMachine.Labels[clusterv1.MachineControlPlaneNameLabel]; ok { - if err := scope.DeleteVPCLoadBalancerPoolMember(); err != nil { + if err := scope.DeleteVPCLoadBalancerPoolMember(ctx); err != nil { return ctrl.Result{}, fmt.Errorf("failed to delete loadBalancer pool member: %w", err) } } if err := scope.DeleteMachine(); err != nil { - scope.Info("error deleting IBMVPCMachine") + log.Info("Error deleting IBMVPCMachine") return ctrl.Result{}, fmt.Errorf("error deleting IBMVPCMachine %s/%s: %w", scope.IBMVPCMachine.Namespace, scope.IBMVPCMachine.Spec.Name, err) } @@ -290,3 +362,56 @@ func (r *IBMVPCMachineReconciler) reconcileDelete(scope *scope.MachineScope) (_ return ctrl.Result{}, nil } + +func patchIBMVPCMachine(ctx context.Context, patchHelper *v1beta1patch.Helper, ibmVPCMachine *infrav1.IBMVPCMachine) error { + // Before computing ready condition, make sure that InstanceReady is always set. + // NOTE: This is required because v1beta2 conditions comply to guideline requiring conditions to be set at the + // first reconcile. + if c := v1beta2conditions.Get(ibmVPCMachine, infrav1.IBMVPCMachineInstanceReadyV1Beta2Condition); c == nil { + if ibmVPCMachine.Status.Ready { + v1beta2conditions.Set(ibmVPCMachine, metav1.Condition{ + Type: infrav1.IBMVPCMachineInstanceReadyV1Beta2Condition, + Status: metav1.ConditionTrue, + Reason: infrav1.IBMVPCMachineInstanceReadyV1Beta2Reason, + }) + } else { + v1beta2conditions.Set(ibmVPCMachine, metav1.Condition{ + Type: infrav1.IBMVPCMachineInstanceReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.IBMVPCMachineInstanceNotReadyV1Beta2Reason, + }) + } + } + + v1beta1conditions.SetSummary(ibmVPCMachine, + v1beta1conditions.WithConditions( + infrav1.InstanceReadyCondition, + ), + ) + + if err := v1beta2conditions.SetSummaryCondition(ibmVPCMachine, ibmVPCMachine, infrav1.IBMVPCMachineReadyV1Beta2Condition, + v1beta2conditions.ForConditionTypes{ + infrav1.IBMVPCMachineInstanceReadyV1Beta2Condition, + }, + // Using a custom merge strategy to override reasons applied during merge. + v1beta2conditions.CustomMergeStrategy{ + MergeStrategy: v1beta2conditions.DefaultMergeStrategy( + // Use custom reasons. + v1beta2conditions.ComputeReasonFunc(v1beta2conditions.GetDefaultComputeMergeReasonFunc( + infrav1.IBMVPCMachineNotReadyV1Beta2Reason, + infrav1.IBMVPCMachineReadyUnknownV1Beta2Reason, + infrav1.IBMVPCMachineReadyV1Beta2Reason, + )), + ), + }, + ); err != nil { + return fmt.Errorf("failed to set %s condition: %w", infrav1.IBMVPCMachineReadyV1Beta2Condition, err) + } + + // Patch the IBMVPCMachine resource. + return patchHelper.Patch(ctx, ibmVPCMachine, v1beta1patch.WithOwnedV1Beta2Conditions{Conditions: []string{ + infrav1.IBMVPCMachineReadyV1Beta2Condition, + infrav1.IBMVPCMachineInstanceReadyV1Beta2Condition, + clusterv1beta1.PausedV1Beta2Condition, + }}) +} diff --git a/controllers/ibmvpcmachine_controller_test.go b/controllers/ibmvpcmachine_controller_test.go index 0f33b3e83..8d0e4dea6 100644 --- a/controllers/ibmvpcmachine_controller_test.go +++ b/controllers/ibmvpcmachine_controller_test.go @@ -30,8 +30,10 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2" "k8s.io/utils/ptr" + "sigs.k8s.io/cluster-api/api/core/v1beta1" //nolint:staticcheck clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" + v1beta2conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions/v1beta2" //nolint:staticcheck ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -228,7 +230,6 @@ func TestIBMVPCMachineReconciler_reconcile(t *testing.T) { Log: klog.Background(), } machineScope = &scope.MachineScope{ - Logger: klog.Background(), IBMVPCMachine: &infrav1.IBMVPCMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "capi-machine", @@ -256,7 +257,7 @@ func TestIBMVPCMachineReconciler_reconcile(t *testing.T) { g := NewWithT(t) setup(t) t.Cleanup(teardown) - _, err := reconciler.reconcileNormal(machineScope) + _, err := reconciler.reconcileNormal(ctx, machineScope) g.Expect(err).To(BeNil()) g.Expect(machineScope.IBMVPCMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) }) @@ -270,7 +271,7 @@ func TestIBMVPCMachineReconciler_reconcile(t *testing.T) { machineScope.Machine.Spec.Bootstrap.DataSecretName = ptr.To("capi-machine") machineScope.IBMVPCCluster.Status.Subnet.ID = ptr.To("capi-subnet-id") mockvpc.EXPECT().ListInstances(options).Return(instancelist, response, errors.New("Failed to create or fetch instance")) - _, err := reconciler.reconcileNormal(machineScope) + _, err := reconciler.reconcileNormal(ctx, machineScope) g.Expect(err).To(Not(BeNil())) g.Expect(machineScope.IBMVPCMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) }) @@ -287,7 +288,6 @@ func TestIBMVPCMachineLBReconciler_reconcile(t *testing.T) { Log: klog.Background(), } machineScope := &scope.MachineScope{ - Logger: klog.Background(), IBMVPCMachine: &infrav1.IBMVPCMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "capi-machine", @@ -386,7 +386,7 @@ func TestIBMVPCMachineLBReconciler_reconcile(t *testing.T) { mockgt.EXPECT().GetTagByName(gomock.AssignableToTypeOf("capi-cluster")).Return(existingTag, nil) mockgt.EXPECT().AttachTag(gomock.AssignableToTypeOf(&globaltaggingv1.AttachTagOptions{})).Return(nil, &core.DetailedResponse{}, nil) - _, err := reconciler.reconcileNormal(machineScope) + _, err := reconciler.reconcileNormal(ctx, machineScope) g.Expect(err).To((Not(BeNil()))) g.Expect(machineScope.IBMVPCMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) }) @@ -401,7 +401,7 @@ func TestIBMVPCMachineLBReconciler_reconcile(t *testing.T) { mockvpc.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(loadBalancer, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListLoadBalancerPoolMembers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancerPoolMembersOptions{})).Return(&vpcv1.LoadBalancerPoolMemberCollection{}, &core.DetailedResponse{}, errors.New("failed to list loadBalancerPoolMembers")) - _, err := reconciler.reconcileNormal(machineScope) + _, err := reconciler.reconcileNormal(ctx, machineScope) g.Expect(err).To(Not(BeNil())) g.Expect(machineScope.IBMVPCMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) }) @@ -421,7 +421,7 @@ func TestIBMVPCMachineLBReconciler_reconcile(t *testing.T) { mockvpc.EXPECT().ListLoadBalancerPoolMembers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancerPoolMembersOptions{})).Return(&vpcv1.LoadBalancerPoolMemberCollection{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().CreateLoadBalancerPoolMember(gomock.AssignableToTypeOf(&vpcv1.CreateLoadBalancerPoolMemberOptions{})).Return(customloadBalancerPoolMember, &core.DetailedResponse{}, nil) - result, err := reconciler.reconcileNormal(machineScope) + result, err := reconciler.reconcileNormal(ctx, machineScope) // Requeue should be set when the Pool Member is found, but not yet ready (active). g.Expect(result.RequeueAfter).To(Not(BeZero())) g.Expect(err).To(BeNil()) @@ -445,7 +445,7 @@ func TestIBMVPCMachineLBReconciler_reconcile(t *testing.T) { mockvpc.EXPECT().ListLoadBalancerPoolMembers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancerPoolMembersOptions{})).Return(&vpcv1.LoadBalancerPoolMemberCollection{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().CreateLoadBalancerPoolMember(gomock.AssignableToTypeOf(&vpcv1.CreateLoadBalancerPoolMemberOptions{})).Return(loadBalancerPoolMember, &core.DetailedResponse{}, nil) - _, err := reconciler.reconcileNormal(machineScope) + _, err := reconciler.reconcileNormal(ctx, machineScope) g.Expect(err).To(BeNil()) g.Expect(machineScope.IBMVPCMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) g.Expect(machineScope.IBMVPCMachine.Status.Ready).To(Equal(true)) @@ -487,7 +487,7 @@ func TestIBMVPCMachineLBReconciler_reconcile(t *testing.T) { } mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(customInstancelist, &core.DetailedResponse{}, nil) - result, err := reconciler.reconcileNormal(machineScope) + result, err := reconciler.reconcileNormal(ctx, machineScope) g.Expect(err).To(BeNil()) g.Expect(result.RequeueAfter).To(Not(BeZero())) g.Expect(machineScope.IBMVPCMachine.Status.Ready).To(Equal(false)) @@ -512,7 +512,7 @@ func TestIBMVPCMachineLBReconciler_reconcile(t *testing.T) { } mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(customInstancelist, &core.DetailedResponse{}, nil) - _, err := reconciler.reconcileNormal(machineScope) + _, err := reconciler.reconcileNormal(ctx, machineScope) g.Expect(err).To(BeNil()) g.Expect(machineScope.IBMVPCMachine.Status.Ready).To(Equal(true)) }) @@ -536,7 +536,7 @@ func TestIBMVPCMachineLBReconciler_reconcile(t *testing.T) { } mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(customInstancelist, &core.DetailedResponse{}, nil) - result, err := reconciler.reconcileNormal(machineScope) + result, err := reconciler.reconcileNormal(ctx, machineScope) g.Expect(err).To(BeNil()) g.Expect(result.RequeueAfter).To(Not(BeZero())) g.Expect(machineScope.IBMVPCMachine.Status.Ready).To(Equal(false)) @@ -561,7 +561,7 @@ func TestIBMVPCMachineLBReconciler_reconcile(t *testing.T) { } mockvpc.EXPECT().ListInstances(gomock.AssignableToTypeOf(&vpcv1.ListInstancesOptions{})).Return(customInstancelist, &core.DetailedResponse{}, nil) - result, err := reconciler.reconcileNormal(machineScope) + result, err := reconciler.reconcileNormal(ctx, machineScope) g.Expect(err).To(BeNil()) g.Expect(result.RequeueAfter).To(BeZero()) g.Expect(machineScope.IBMVPCMachine.Status.Ready).To(Equal(false)) @@ -587,7 +587,6 @@ func TestIBMVPCMachineReconciler_Delete(t *testing.T) { Log: klog.Background(), } machineScope = &scope.MachineScope{ - Logger: klog.Background(), IBMVPCMachine: &infrav1.IBMVPCMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "capi-machine", @@ -611,7 +610,7 @@ func TestIBMVPCMachineReconciler_Delete(t *testing.T) { setup(t) t.Cleanup(teardown) mockvpc.EXPECT().DeleteInstance(gomock.AssignableToTypeOf(options)).Return(nil, errors.New("Failed to delete the VPC instance")) - _, err := reconciler.reconcileDelete(machineScope) + _, err := reconciler.reconcileDelete(ctx, machineScope) g.Expect(err).To(Not(BeNil())) g.Expect(machineScope.IBMVPCMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) }) @@ -621,7 +620,7 @@ func TestIBMVPCMachineReconciler_Delete(t *testing.T) { t.Cleanup(teardown) response := &core.DetailedResponse{} mockvpc.EXPECT().DeleteInstance(gomock.AssignableToTypeOf(options)).Return(response, nil) - _, err := reconciler.reconcileDelete(machineScope) + _, err := reconciler.reconcileDelete(ctx, machineScope) g.Expect(err).To(BeNil()) g.Expect(machineScope.IBMVPCMachine.Finalizers).To(Not(ContainElement(infrav1.MachineFinalizer))) }) @@ -637,7 +636,6 @@ func TestIBMVPCMachineLBReconciler_Delete(t *testing.T) { Log: klog.Background(), } machineScope := &scope.MachineScope{ - Logger: klog.Background(), IBMVPCMachine: &infrav1.IBMVPCMachine{ ObjectMeta: metav1.ObjectMeta{ Name: "capi-machine", @@ -685,7 +683,7 @@ func TestIBMVPCMachineLBReconciler_Delete(t *testing.T) { mockvpc.EXPECT().GetLoadBalancer(gomock.AssignableToTypeOf(&vpcv1.GetLoadBalancerOptions{})).Return(loadBalancer, &core.DetailedResponse{}, nil) mockvpc.EXPECT().GetInstance(gomock.AssignableToTypeOf(&vpcv1.GetInstanceOptions{})).Return(&vpcv1.Instance{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListLoadBalancerPoolMembers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancerPoolMembersOptions{})).Return(&vpcv1.LoadBalancerPoolMemberCollection{}, &core.DetailedResponse{}, errors.New("failed to list LoadBalancerPoolMembers")) - _, err := reconciler.reconcileDelete(machineScope) + _, err := reconciler.reconcileDelete(ctx, machineScope) g.Expect(err).To((Not(BeNil()))) g.Expect(machineScope.IBMVPCMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) }) @@ -697,9 +695,139 @@ func TestIBMVPCMachineLBReconciler_Delete(t *testing.T) { mockvpc.EXPECT().GetInstance(gomock.AssignableToTypeOf(&vpcv1.GetInstanceOptions{})).Return(&vpcv1.Instance{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().ListLoadBalancerPoolMembers(gomock.AssignableToTypeOf(&vpcv1.ListLoadBalancerPoolMembersOptions{})).Return(&vpcv1.LoadBalancerPoolMemberCollection{}, &core.DetailedResponse{}, nil) mockvpc.EXPECT().DeleteInstance(gomock.AssignableToTypeOf(&vpcv1.DeleteInstanceOptions{})).Return(&core.DetailedResponse{}, nil) - _, err := reconciler.reconcileDelete(machineScope) + _, err := reconciler.reconcileDelete(ctx, machineScope) g.Expect(err).To(BeNil()) g.Expect(machineScope.IBMVPCMachine.Finalizers).To(Not(ContainElement(infrav1.MachineFinalizer))) }) }) } + +func TestIBMVPCMachine_Reconcile_Conditions(t *testing.T) { + testCases := []struct { + name string + vpcMachine *infrav1.IBMVPCMachine + ownerMachine *clusterv1.Machine + vpcCluster *infrav1.IBMVPCCluster + ownerCluster *clusterv1.Cluster + expectedCondition metav1.Condition + expectError bool + }{ + { + name: "Should set conditions on first reconcile", + vpcMachine: &infrav1.IBMVPCMachine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "vpc-machine", Labels: map[string]string{ + clusterv1.ClusterNameAnnotation: "capi-cluster"}, + Finalizers: []string{infrav1.MachineFinalizer}, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Machine", + Name: "capi-test-machine", + UID: "1", + }, + { + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Cluster", + Name: "capi-cluster", + UID: "1", + }, + }, + }, + Spec: infrav1.IBMVPCMachineSpec{ + Image: &infrav1.IBMVPCResourceReference{}, + }, + }, + vpcCluster: &infrav1.IBMVPCCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "capi-cluster"}, + Spec: infrav1.IBMVPCClusterSpec{ + ControlPlaneEndpoint: v1beta1.APIEndpoint{ + Host: "cluster-host", + }, + }, + }, + ownerMachine: &clusterv1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: "capi-test-machine"}}, + ownerCluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "capi-cluster"}, + Spec: clusterv1.ClusterSpec{ + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Name: "capi-cluster"}}}, + expectedCondition: metav1.Condition{ + Type: infrav1.IBMVPCMachineInstanceReadyV1Beta2Condition, + Status: metav1.ConditionFalse, + Reason: infrav1.IBMVPCMachineInstanceNotReadyV1Beta2Reason, + }, + expectError: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + g := NewWithT(t) + reconciler := &IBMVPCMachineReconciler{ + Client: testEnv.Client, + Log: klog.Background(), + } + ns, err := testEnv.CreateNamespace(ctx, fmt.Sprintf("namespace-%s", util.RandomString(5))) + g.Expect(err).To(BeNil()) + defer func() { + g.Expect(testEnv.Cleanup(ctx, ns)).To(Succeed()) + }() + + createObject(g, tc.ownerCluster, ns.Name) + defer cleanupObject(g, tc.ownerCluster) + + createObject(g, tc.vpcCluster, ns.Name) + defer cleanupObject(g, tc.vpcCluster) + + createObject(g, tc.ownerMachine, ns.Name) + defer cleanupObject(g, tc.ownerMachine) + + createObject(g, tc.vpcMachine, ns.Name) + defer cleanupObject(g, tc.vpcMachine) + + g.Eventually(func() bool { + machine := &infrav1.IBMVPCMachine{} + key := client.ObjectKey{ + Name: tc.vpcMachine.Name, + Namespace: ns.Name, + } + err = testEnv.Get(ctx, key, machine) + return err == nil + }, 10*time.Second).Should(Equal(true)) + + _, err = reconciler.Reconcile(ctx, ctrl.Request{ + NamespacedName: client.ObjectKey{ + Namespace: tc.vpcMachine.Namespace, + Name: tc.vpcMachine.Name, + }, + }) + + if tc.expectError { + g.Expect(err).ToNot(BeNil()) + } else { + g.Expect(err).To(BeNil()) + } + + machine := &infrav1.IBMVPCMachine{} + key := client.ObjectKey{ + Name: tc.vpcMachine.Name, + Namespace: ns.Name, + } + + err = testEnv.Get(ctx, key, machine) + g.Expect(err).To(BeNil()) + g.Expect(len(machine.Status.V1Beta2.Conditions)).To(BeNumerically(">", 0)) + + instanceReadyCondition := v1beta2conditions.Get(machine, infrav1.IBMPowerVSMachineInstanceReadyV1Beta2Condition) + g.Expect(instanceReadyCondition).To(Not(BeNil())) + g.Expect(tc.expectedCondition.Type).To(Equal(instanceReadyCondition.Type)) + g.Expect(tc.expectedCondition.Status).To(Equal(instanceReadyCondition.Status)) + g.Expect(tc.expectedCondition.Reason).To(Equal(instanceReadyCondition.Reason)) + }) + } +}