Skip to content

Commit e0b2279

Browse files
authored
Merge pull request #1621 from anastaruno/main
🌱 Migrating /pkg/cloud/services/loadbalancer/loadbalancer.go and /controllers/openstackmachine_controller.go to structured logging
2 parents f5e74ce + 6915c37 commit e0b2279

File tree

2 files changed

+12
-12
lines changed

2 files changed

+12
-12
lines changed

controllers/openstackmachine_controller.go

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -358,12 +358,12 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope
358358

359359
switch instanceStatus.State() {
360360
case infrav1.InstanceStateActive:
361-
scope.Logger().Info("Machine instance state is ACTIVE", "instance-id", instanceStatus.ID())
361+
scope.Logger().Info("Machine instance state is ACTIVE", "id", instanceStatus.ID())
362362
conditions.MarkTrue(openStackMachine, infrav1.InstanceReadyCondition)
363363
openStackMachine.Status.Ready = true
364364
case infrav1.InstanceStateError:
365365
// Error is unexpected, thus we report error and never retry
366-
scope.Logger().Info("Machine instance state is ERROR", "instance-id", instanceStatus.ID())
366+
scope.Logger().Info("Machine instance state is ERROR", "id", instanceStatus.ID())
367367
err = fmt.Errorf("instance state %q is unexpected", instanceStatus.State())
368368
openStackMachine.SetFailure(capierrors.UpdateMachineError, err)
369369
conditions.MarkFalse(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceStateErrorReason, clusterv1.ConditionSeverityError, "")
@@ -376,7 +376,7 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope
376376
default:
377377
// The other state is normal (for example, migrating, shutoff) but we don't want to proceed until it's ACTIVE
378378
// due to potential conflict or unexpected actions
379-
scope.Logger().Info("Waiting for instance to become ACTIVE", "instance-id", instanceStatus.ID(), "status", instanceStatus.State())
379+
scope.Logger().Info("Waiting for instance to become ACTIVE", "id", instanceStatus.ID(), "status", instanceStatus.State())
380380
conditions.MarkUnknown(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotReadyReason, "Instance state is not handled: %s", instanceStatus.State())
381381
return ctrl.Result{RequeueAfter: waitForInstanceBecomeActiveToReconcile}, nil
382382
}
@@ -409,7 +409,7 @@ func (r *OpenStackMachineReconciler) reconcileNormal(ctx context.Context, scope
409409
}
410410

411411
if fp.PortID != "" {
412-
scope.Logger().Info("Floating IP already associated to a port:", "id", fp.ID, "fixed ip", fp.FixedIP, "portID", port.ID)
412+
scope.Logger().Info("Floating IP already associated to a port", "id", fp.ID, "fixedIP", fp.FixedIP, "portID", port.ID)
413413
} else {
414414
err = networkingService.AssociateFloatingIP(openStackMachine, fp, port.ID)
415415
if err != nil {
@@ -432,7 +432,7 @@ func (r *OpenStackMachineReconciler) getOrCreate(logger logr.Logger, cluster *cl
432432

433433
if instanceStatus == nil {
434434
instanceSpec := machineToInstanceSpec(openStackCluster, machine, openStackMachine, userData)
435-
logger.Info("Machine not exist, Creating Machine", "Machine", openStackMachine.Name)
435+
logger.Info("Machine does not exist, creating Machine", "name", openStackMachine.Name)
436436
instanceStatus, err = computeService.CreateInstance(openStackMachine, openStackCluster, instanceSpec, cluster.Name, false)
437437
if err != nil {
438438
conditions.MarkFalse(openStackMachine, infrav1.InstanceReadyCondition, infrav1.InstanceCreateFailedReason, clusterv1.ConditionSeverityError, err.Error())

pkg/cloud/services/loadbalancer/loadbalancer.go

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -169,7 +169,7 @@ func (s *Service) getOrCreateLoadBalancer(openStackCluster *infrav1.OpenStackClu
169169
return lb, nil
170170
}
171171

172-
s.scope.Logger().Info(fmt.Sprintf("Creating load balancer in subnet: %q", subnetID), "name", loadBalancerName)
172+
s.scope.Logger().Info("Creating load balancer in subnet", "subnetID", subnetID, "name", loadBalancerName)
173173

174174
lbCreateOpts := loadbalancers.CreateOpts{
175175
Name: loadBalancerName,
@@ -199,7 +199,7 @@ func (s *Service) getOrCreateListener(openStackCluster *infrav1.OpenStackCluster
199199
return listener, nil
200200
}
201201

202-
s.scope.Logger().Info("Creating load balancer listener", "name", listenerName, "lb-id", lbID)
202+
s.scope.Logger().Info("Creating load balancer listener", "name", listenerName, "loadBalancerID", lbID)
203203

204204
listenerCreateOpts := listeners.CreateOpts{
205205
Name: listenerName,
@@ -267,7 +267,7 @@ func (s *Service) getOrUpdateAllowedCIDRS(openStackCluster *infrav1.OpenStackClu
267267
listener.AllowedCIDRs = capostrings.Unique(listener.AllowedCIDRs)
268268

269269
if !reflect.DeepEqual(allowedCIDRs, listener.AllowedCIDRs) {
270-
s.scope.Logger().Info("CIDRs do not match, start to update listener", "expected CIDRs", allowedCIDRs, "load balancer existing CIDR", listener.AllowedCIDRs)
270+
s.scope.Logger().Info("CIDRs do not match, updating listener", "expectedCIDRs", allowedCIDRs, "currentCIDRs", listener.AllowedCIDRs)
271271
listenerUpdateOpts := listeners.UpdateOpts{
272272
AllowedCIDRs: &allowedCIDRs,
273273
}
@@ -316,7 +316,7 @@ func (s *Service) getOrCreatePool(openStackCluster *infrav1.OpenStackCluster, po
316316
return pool, nil
317317
}
318318

319-
s.scope.Logger().Info(fmt.Sprintf("Creating load balancer pool for listener %q", listenerID), "name", poolName, "lb-id", lbID)
319+
s.scope.Logger().Info("Creating load balancer pool for listener", "loadBalancerID", lbID, "listenerID", listenerID, "name", poolName)
320320

321321
method := pools.LBMethodRoundRobin
322322

@@ -356,7 +356,7 @@ func (s *Service) getOrCreateMonitor(openStackCluster *infrav1.OpenStackCluster,
356356
return nil
357357
}
358358

359-
s.scope.Logger().Info(fmt.Sprintf("Creating load balancer monitor for pool %q", poolID), "name", monitorName, "lb-id", lbID)
359+
s.scope.Logger().Info("Creating load balancer monitor for pool", "loadBalancerID", lbID, "name", monitorName, "poolID", poolID)
360360

361361
monitorCreateOpts := monitors.CreateOpts{
362362
Name: monitorName,
@@ -400,7 +400,7 @@ func (s *Service) ReconcileLoadBalancerMember(openStackCluster *infrav1.OpenStac
400400
}
401401

402402
loadBalancerName := getLoadBalancerName(clusterName)
403-
s.scope.Logger().Info("Reconciling load balancer member", "name", loadBalancerName)
403+
s.scope.Logger().Info("Reconciling load balancer member", "loadBalancerName", loadBalancerName)
404404

405405
lbID := openStackCluster.Status.APIServerLoadBalancer.ID
406406
portList := []int{int(openStackCluster.Spec.ControlPlaneEndpoint.Port)}
@@ -429,7 +429,7 @@ func (s *Service) ReconcileLoadBalancerMember(openStackCluster *infrav1.OpenStac
429429
continue
430430
}
431431

432-
s.scope.Logger().Info("Deleting load balancer member (because the IP of the machine changed)", "name", name)
432+
s.scope.Logger().Info("Deleting load balancer member because the IP of the machine changed", "name", name)
433433

434434
// lb member changed so let's delete it so we can create it again with the correct IP
435435
err = s.waitForLoadBalancerActive(lbID)

0 commit comments

Comments
 (0)