Skip to content

Commit 1904cab

Browse files
🌱 fix linting (#1503)
fix linting fix linting errors/warnings caused by the update of the linter `go-vet` Signed-off-by: Dhairya Arora <[email protected]>
1 parent c7f50a3 commit 1904cab

File tree

13 files changed

+79
-15
lines changed

13 files changed

+79
-15
lines changed

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
<br>
44

55
<div align="center">
6-
<a href="https://syself.com/docs/caph/getting-started/quickstart">Quickstart</a> |
6+
<a href="https://syself.com/docs/caph/getting-started/quickstart/prerequisites">Quickstart</a> |
77
<a href="https://syself.com/docs/caph/getting-started/introduction">Docs</a> |
88
<a href="https://cluster-api.sigs.k8s.io/">Cluster API Book</a><br><br>
99
<p>⭐ Consider leaving a star — it motivates us a lot! ⭐</p>
@@ -62,7 +62,7 @@ The best way to get started with CAPH is to spin up a cluster. For that you can
6262

6363
Additional resources from the documentation:
6464

65-
- [**Cluster API Provider Hetzner 15 Minute Tutorial**](https://syself.com/docs/caph/getting-started/quickstart): Set up a bootstrap cluster using Kind and deploy a Kubernetes cluster on Hetzner.
65+
- [**Cluster API Provider Hetzner 15 Minute Tutorial**](https://syself.com/docs/caph/getting-started/quickstart/prerequisites): Set up a bootstrap cluster using Kind and deploy a Kubernetes cluster on Hetzner.
6666
- [**Develop and test Kubernetes clusters with Tilt**](https://syself.com/docs/caph/developers/development-guide): Start using Tilt for rapid testing of various cluster flavors, like with/without a private network or bare metal.
6767
- [**Develop and test your own node-images**](https://syself.com/docs/caph/topics/node-image): Learn how to use your own machine images for production systems.
6868

controllers/hetznerbaremetalhost_controller.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -274,6 +274,7 @@ func (r *HetznerBareMetalHostReconciler) getSecrets(
274274
infrav1.CredentialsAvailableCondition,
275275
infrav1.OSSSHSecretMissingReason,
276276
clusterv1.ConditionSeverityError,
277+
"%s",
277278
msg,
278279
)
279280
record.Warnf(bmHost, infrav1.OSSSHSecretMissingReason, msg)

controllers/hetznercluster_controller.go

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -234,6 +234,7 @@ func (r *HetznerClusterReconciler) reconcileNormal(ctx context.Context, clusterS
234234
infrav1.TargetClusterSecretReadyCondition,
235235
infrav1.TargetSecretSyncFailedReason,
236236
clusterv1.ConditionSeverityError,
237+
"%s",
237238
reterr.Error(),
238239
)
239240
return reconcile.Result{}, reterr
@@ -252,7 +253,7 @@ func processControlPlaneEndpoint(hetznerCluster *infrav1.HetznerCluster) {
252253
if hetznerCluster.Spec.ControlPlaneLoadBalancer.Enabled {
253254
if hetznerCluster.Status.ControlPlaneLoadBalancer.IPv4 != "<nil>" {
254255
defaultHost := hetznerCluster.Status.ControlPlaneLoadBalancer.IPv4
255-
defaultPort := int32(hetznerCluster.Spec.ControlPlaneLoadBalancer.Port)
256+
defaultPort := int32(hetznerCluster.Spec.ControlPlaneLoadBalancer.Port) //nolint:gosec // Validation for the port range (1 to 65535) is already done via kubebuilder.
256257

257258
if hetznerCluster.Spec.ControlPlaneEndpoint == nil {
258259
hetznerCluster.Spec.ControlPlaneEndpoint = &clusterv1.APIEndpoint{
@@ -270,7 +271,7 @@ func processControlPlaneEndpoint(hetznerCluster *infrav1.HetznerCluster) {
270271
conditions.MarkTrue(hetznerCluster, infrav1.ControlPlaneEndpointSetCondition)
271272
hetznerCluster.Status.Ready = true
272273
} else {
273-
msg := "enabled LoadBalancer but load balancer not ready yet"
274+
const msg = "enabled LoadBalancer but load balancer not ready yet"
274275
conditions.MarkFalse(hetznerCluster,
275276
infrav1.ControlPlaneEndpointSetCondition,
276277
infrav1.ControlPlaneEndpointNotSetReason,
@@ -283,7 +284,7 @@ func processControlPlaneEndpoint(hetznerCluster *infrav1.HetznerCluster) {
283284
conditions.MarkTrue(hetznerCluster, infrav1.ControlPlaneEndpointSetCondition)
284285
hetznerCluster.Status.Ready = true
285286
} else {
286-
msg := "disabled LoadBalancer and not yet provided ControlPlane endpoint"
287+
const msg = "disabled LoadBalancer and not yet provided ControlPlane endpoint"
287288
conditions.MarkFalse(hetznerCluster,
288289
infrav1.ControlPlaneEndpointSetCondition,
289290
infrav1.ControlPlaneEndpointNotSetReason,
@@ -454,6 +455,7 @@ func hcloudTokenErrorResult(
454455
conditionType,
455456
infrav1.HCloudCredentialsInvalidReason,
456457
clusterv1.ConditionSeverityError,
458+
"%s",
457459
err.Error(),
458460
)
459461
return reconcile.Result{}, fmt.Errorf("an unhandled failure occurred with the Hetzner secret: %w", err)
@@ -575,6 +577,7 @@ func (r *HetznerClusterReconciler) reconcileTargetClusterManager(ctx context.Con
575577
infrav1.TargetClusterReadyCondition,
576578
infrav1.TargetClusterCreateFailedReason,
577579
clusterv1.ConditionSeverityError,
580+
"%s",
578581
err.Error(),
579582
)
580583

controllers/hetznercluster_controller_test.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1283,8 +1283,8 @@ func TestSetControlPlaneEndpoint(t *testing.T) {
12831283
t.Fatalf("Wrong value for Host set. Got: %s, Want: %s", hetznerCluster.Spec.ControlPlaneEndpoint.Host, hetznerCluster.Status.ControlPlaneLoadBalancer.IPv4)
12841284
}
12851285

1286-
if hetznerCluster.Spec.ControlPlaneEndpoint.Port != int32(hetznerCluster.Spec.ControlPlaneLoadBalancer.Port) {
1287-
t.Fatalf("Wrong value for Port set. Got: %d, Want: %d", hetznerCluster.Spec.ControlPlaneEndpoint.Port, int32(hetznerCluster.Spec.ControlPlaneLoadBalancer.Port))
1286+
if hetznerCluster.Spec.ControlPlaneEndpoint.Port != int32(hetznerCluster.Spec.ControlPlaneLoadBalancer.Port) { //nolint:gosec // Validation for the port range (1 to 65535) is already done via kubebuilder.
1287+
t.Fatalf("Wrong value for Port set. Got: %d, Want: %d", hetznerCluster.Spec.ControlPlaneEndpoint.Port, int32(hetznerCluster.Spec.ControlPlaneLoadBalancer.Port)) //nolint:gosec // Validation for the port range (1 to 65535) is already done via kubebuilder.
12881288
}
12891289

12901290
if hetznerCluster.Status.Ready != true {
@@ -1317,8 +1317,8 @@ func TestSetControlPlaneEndpoint(t *testing.T) {
13171317
t.Fatalf("Wrong value for Host set. Got: %s, Want: %s", hetznerCluster.Spec.ControlPlaneEndpoint.Host, hetznerCluster.Status.ControlPlaneLoadBalancer.IPv4)
13181318
}
13191319

1320-
if hetznerCluster.Spec.ControlPlaneEndpoint.Port != int32(hetznerCluster.Spec.ControlPlaneLoadBalancer.Port) {
1321-
t.Fatalf("Wrong value for Port set. Got: %d, Want: %d", hetznerCluster.Spec.ControlPlaneEndpoint.Port, int32(hetznerCluster.Spec.ControlPlaneLoadBalancer.Port))
1320+
if hetznerCluster.Spec.ControlPlaneEndpoint.Port != int32(hetznerCluster.Spec.ControlPlaneLoadBalancer.Port) { //nolint:gosec // Validation for the port range (1 to 65535) is already done via kubebuilder.
1321+
t.Fatalf("Wrong value for Port set. Got: %d, Want: %d", hetznerCluster.Spec.ControlPlaneEndpoint.Port, int32(hetznerCluster.Spec.ControlPlaneLoadBalancer.Port)) //nolint:gosec // Validation for the port range (1 to 65535) is already done via kubebuilder.
13221322
}
13231323

13241324
if hetznerCluster.Status.Ready != true {
@@ -1351,8 +1351,8 @@ func TestSetControlPlaneEndpoint(t *testing.T) {
13511351
t.Fatalf("Wrong value for Host set. Got: %s, Want: 'xyz'", hetznerCluster.Spec.ControlPlaneEndpoint.Host)
13521352
}
13531353

1354-
if hetznerCluster.Spec.ControlPlaneEndpoint.Port != int32(hetznerCluster.Spec.ControlPlaneLoadBalancer.Port) {
1355-
t.Fatalf("Wrong value for Port set. Got: %d, Want: %d", hetznerCluster.Spec.ControlPlaneEndpoint.Port, int32(hetznerCluster.Spec.ControlPlaneLoadBalancer.Port))
1354+
if hetznerCluster.Spec.ControlPlaneEndpoint.Port != int32(hetznerCluster.Spec.ControlPlaneLoadBalancer.Port) { //nolint:gosec // Validation for the port range (1 to 65535) is already done via kubebuilder.
1355+
t.Fatalf("Wrong value for Port set. Got: %d, Want: %d", hetznerCluster.Spec.ControlPlaneEndpoint.Port, int32(hetznerCluster.Spec.ControlPlaneLoadBalancer.Port)) //nolint:gosec // Validation for the port range (1 to 65535) is already done via kubebuilder.
13561356
}
13571357

13581358
if hetznerCluster.Status.Ready != true {

pkg/scope/cluster.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -143,7 +143,7 @@ func (s *ClusterScope) SetStatusFailureDomain(regions []infrav1.Region) {
143143

144144
// ControlPlaneAPIEndpointPort returns the Port of the Kube-api server.
145145
func (s *ClusterScope) ControlPlaneAPIEndpointPort() int32 {
146-
return int32(s.HetznerCluster.Spec.ControlPlaneLoadBalancer.Port)
146+
return int32(s.HetznerCluster.Spec.ControlPlaneLoadBalancer.Port) //nolint:gosec // Validation for the port range (1 to 65535) is already done via kubebuilder.
147147
}
148148

149149
// ClientConfig return a kubernetes client config for the cluster context.

pkg/services/baremetal/baremetal/baremetal.go

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -215,7 +215,14 @@ func (s *Service) update(ctx context.Context) error {
215215
if readyCondition.Status == corev1.ConditionTrue {
216216
conditions.MarkTrue(s.scope.BareMetalMachine, infrav1.HostReadyCondition)
217217
} else if readyCondition.Status == corev1.ConditionFalse {
218-
conditions.MarkFalse(s.scope.BareMetalMachine, infrav1.HostReadyCondition, readyCondition.Reason, readyCondition.Severity, readyCondition.Message)
218+
conditions.MarkFalse(
219+
s.scope.BareMetalMachine,
220+
infrav1.HostReadyCondition,
221+
readyCondition.Reason,
222+
readyCondition.Severity,
223+
"%s",
224+
readyCondition.Message,
225+
)
219226
}
220227
}
221228

@@ -297,6 +304,7 @@ func (s *Service) associate(ctx context.Context) error {
297304
infrav1.HostAssociateSucceededCondition,
298305
infrav1.NoAvailableHostReason,
299306
clusterv1.ConditionSeverityWarning,
307+
"%s",
300308
fmt.Sprintf("no available host (%s)", reason),
301309
)
302310
return &scope.RequeueAfterError{RequeueAfter: requeueAfter}
@@ -318,6 +326,7 @@ func (s *Service) associate(ctx context.Context) error {
318326
infrav1.HostAssociateSucceededCondition,
319327
infrav1.HostAssociateFailedReason,
320328
clusterv1.ConditionSeverityWarning,
329+
"%s",
321330
reterr.Error(),
322331
)
323332
return reterr

pkg/services/baremetal/host/host.go

Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -182,6 +182,7 @@ func (s *Service) actionPreparing(_ context.Context) actionResult {
182182
infrav1.ProvisionSucceededCondition,
183183
infrav1.ServerNotFoundReason,
184184
clusterv1.ConditionSeverityError,
185+
"%s",
185186
msg,
186187
)
187188
record.Warnf(s.scope.HetznerBareMetalHost, infrav1.ServerNotFoundReason, msg)
@@ -224,6 +225,7 @@ func (s *Service) actionPreparing(_ context.Context) actionResult {
224225
infrav1.ProvisionSucceededCondition,
225226
infrav1.RescueSystemUnavailableReason,
226227
clusterv1.ConditionSeverityError,
228+
"%s",
227229
errMsg,
228230
)
229231
record.Warnf(s.scope.HetznerBareMetalHost, "NoRescueSystemAvailable", errMsg)
@@ -336,6 +338,7 @@ func (s *Service) ensureSSHKey(sshSecretRef infrav1.SSHSecretRef, sshSecret *cor
336338
infrav1.CredentialsAvailableCondition,
337339
infrav1.SSHKeyAlreadyExistsReason,
338340
clusterv1.ConditionSeverityError,
341+
"%s",
339342
msg,
340343
)
341344
record.Warnf(s.scope.HetznerBareMetalHost, infrav1.SSHKeyAlreadyExistsReason, msg)
@@ -367,6 +370,7 @@ func (s *Service) handleIncompleteBoot(isRebootIntoRescue, isTimeout, isConnecti
367370
infrav1.ProvisionSucceededCondition,
368371
infrav1.SSHConnectionRefusedReason,
369372
clusterv1.ConditionSeverityError,
373+
"%s",
370374
msg,
371375
)
372376
record.Warnf(s.scope.HetznerBareMetalHost, "SSHConnectionError", msg)
@@ -538,6 +542,7 @@ func (s *Service) handleErrorTypeHardwareRebootFailed(isSSHTimeoutError, wantsRe
538542
infrav1.ProvisionSucceededCondition,
539543
infrav1.RebootTimedOutReason,
540544
clusterv1.ConditionSeverityError,
545+
"%s",
541546
msg,
542547
)
543548

@@ -653,6 +658,7 @@ func (s *Service) actionRegistering(_ context.Context) actionResult {
653658
infrav1.RootDeviceHintsValidatedCondition,
654659
infrav1.ValidationFailedReason,
655660
clusterv1.ConditionSeverityError,
661+
"%s",
656662
errMsg,
657663
)
658664
return s.recordActionFailure(infrav1.RegistrationError, errMsg)
@@ -665,6 +671,7 @@ func (s *Service) actionRegistering(_ context.Context) actionResult {
665671
infrav1.RootDeviceHintsValidatedCondition,
666672
infrav1.ValidationFailedReason,
667673
clusterv1.ConditionSeverityError,
674+
"%s",
668675
err.Error(),
669676
)
670677
return s.recordActionFailure(infrav1.RegistrationError, err.Error())
@@ -691,6 +698,7 @@ func (s *Service) actionRegistering(_ context.Context) actionResult {
691698
infrav1.RootDeviceHintsValidatedCondition,
692699
infrav1.ValidationFailedReason,
693700
clusterv1.ConditionSeverityError,
701+
"%s",
694702
msg,
695703
)
696704
return s.recordActionFailure(infrav1.FatalError, msg)
@@ -1112,6 +1120,7 @@ func (s *Service) actionImageInstallingStartBackgroundProcess(ctx context.Contex
11121120
infrav1.ProvisionSucceededCondition,
11131121
infrav1.CheckDiskFailedReason,
11141122
clusterv1.ConditionSeverityError,
1123+
"%s",
11151124
msg,
11161125
)
11171126
record.Warn(s.scope.HetznerBareMetalHost, infrav1.CheckDiskFailedReason, msg)
@@ -1143,6 +1152,7 @@ func (s *Service) actionImageInstallingStartBackgroundProcess(ctx context.Contex
11431152
infrav1.ProvisionSucceededCondition,
11441153
infrav1.WipeDiskFailedReason,
11451154
clusterv1.ConditionSeverityError,
1155+
"%s",
11461156
msg,
11471157
)
11481158
record.Warn(s.scope.HetznerBareMetalHost, infrav1.WipeDiskFailedReason, msg)
@@ -1157,6 +1167,7 @@ func (s *Service) actionImageInstallingStartBackgroundProcess(ctx context.Contex
11571167
infrav1.ProvisionSucceededCondition,
11581168
infrav1.WipeDiskFailedReason,
11591169
clusterv1.ConditionSeverityWarning,
1170+
"%s",
11601171
msg,
11611172
)
11621173
record.Warn(s.scope.HetznerBareMetalHost, infrav1.WipeDiskFailedReason, msg)
@@ -1184,6 +1195,7 @@ func (s *Service) actionImageInstallingStartBackgroundProcess(ctx context.Contex
11841195
infrav1.ProvisionSucceededCondition,
11851196
infrav1.LinuxOnOtherDiskFoundReason,
11861197
clusterv1.ConditionSeverityError,
1198+
"%s",
11871199
msg,
11881200
)
11891201
record.Warn(s.scope.HetznerBareMetalHost, infrav1.LinuxOnOtherDiskFoundReason, msg)
@@ -1200,6 +1212,7 @@ func (s *Service) actionImageInstallingStartBackgroundProcess(ctx context.Contex
12001212
infrav1.ProvisionSucceededCondition,
12011213
infrav1.SSHToRescueSystemFailedReason,
12021214
clusterv1.ConditionSeverityInfo,
1215+
"%s",
12031216
msg,
12041217
)
12051218
record.Event(s.scope.HetznerBareMetalHost, infrav1.SSHToRescueSystemFailedReason, msg)
@@ -1331,6 +1344,7 @@ func (s *Service) createAutoSetupInput(sshClient sshclient.Client) (autoSetupInp
13311344
infrav1.ProvisionSucceededCondition,
13321345
infrav1.ImageSpecInvalidReason,
13331346
clusterv1.ConditionSeverityError,
1347+
"%s",
13341348
errorMessage,
13351349
)
13361350
return autoSetupInput{}, s.recordActionFailure(infrav1.ProvisioningError, errorMessage)
@@ -1344,6 +1358,7 @@ func (s *Service) createAutoSetupInput(sshClient sshclient.Client) (autoSetupInp
13441358
infrav1.ProvisionSucceededCondition,
13451359
infrav1.ImageDownloadFailedReason,
13461360
clusterv1.ConditionSeverityError,
1361+
"%s",
13471362
err.Error(),
13481363
)
13491364
return autoSetupInput{}, actionError{err: err}
@@ -1367,6 +1382,7 @@ func (s *Service) createAutoSetupInput(sshClient sshclient.Client) (autoSetupInp
13671382
infrav1.ProvisionSucceededCondition,
13681383
infrav1.NoStorageDeviceFoundReason,
13691384
clusterv1.ConditionSeverityError,
1385+
"%s",
13701386
msg,
13711387
)
13721388
return autoSetupInput{}, s.recordActionFailure(infrav1.ProvisioningError, msg)
@@ -1817,6 +1833,7 @@ func (s *Service) handleRobotRateLimitExceeded(err error, functionName string) {
18171833
infrav1.HetznerAPIReachableCondition,
18181834
infrav1.RateLimitExceededReason,
18191835
clusterv1.ConditionSeverityWarning,
1836+
"%s",
18201837
msg,
18211838
)
18221839
record.Warnf(s.scope.HetznerBareMetalHost, "RateLimitExceeded", msg)

pkg/services/baremetal/host/state_machine.go

Lines changed: 17 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -172,7 +172,15 @@ func (hsm *hostStateMachine) updateOSSSHStatusAndValidateKey(osSSHSecret *corev1
172172
}
173173
if err := validateSSHKey(osSSHSecret, hsm.host.Spec.Status.SSHSpec.SecretRef); err != nil {
174174
msg := fmt.Sprintf("ssh credentials are invalid: %s", err.Error())
175-
conditions.MarkFalse(hsm.host, infrav1.CredentialsAvailableCondition, infrav1.SSHCredentialsInSecretInvalidReason, clusterv1.ConditionSeverityError, msg)
175+
conditions.MarkFalse(
176+
hsm.host,
177+
infrav1.CredentialsAvailableCondition,
178+
infrav1.SSHCredentialsInSecretInvalidReason,
179+
clusterv1.ConditionSeverityError,
180+
"%s",
181+
msg,
182+
)
183+
176184
record.Warnf(hsm.host, infrav1.SSHKeyAlreadyExistsReason, msg)
177185
return hsm.reconciler.recordActionFailure(infrav1.PreparationError, infrav1.ErrorMessageMissingOrInvalidSecretData)
178186
}
@@ -202,7 +210,14 @@ func (hsm *hostStateMachine) updateRescueSSHStatusAndValidateKey(rescueSSHSecret
202210
}
203211
if err := validateSSHKey(rescueSSHSecret, hsm.reconciler.scope.HetznerCluster.Spec.SSHKeys.RobotRescueSecretRef); err != nil {
204212
msg := fmt.Sprintf("ssh credentials for rescue system are invalid: %s", err.Error())
205-
conditions.MarkFalse(hsm.host, infrav1.CredentialsAvailableCondition, infrav1.SSHCredentialsInSecretInvalidReason, clusterv1.ConditionSeverityError, msg)
213+
conditions.MarkFalse(
214+
hsm.host,
215+
infrav1.CredentialsAvailableCondition,
216+
infrav1.SSHCredentialsInSecretInvalidReason,
217+
clusterv1.ConditionSeverityError,
218+
"%s",
219+
msg,
220+
)
206221
return hsm.reconciler.recordActionFailure(infrav1.PreparationError, infrav1.ErrorMessageMissingOrInvalidSecretData)
207222
}
208223
return nil

pkg/services/hcloud/loadbalancer/loadbalancer.go

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,7 @@ func (s *Service) Reconcile(ctx context.Context) (reconcile.Result, error) {
9494
infrav1.LoadBalancerReadyCondition,
9595
infrav1.LoadBalancerUpdateFailedReason,
9696
clusterv1.ConditionSeverityWarning,
97+
"%s",
9798
err.Error(),
9899
)
99100
return reconcile.Result{}, fmt.Errorf("failed to reconcile load balancer properties: %w", err)
@@ -109,6 +110,7 @@ func (s *Service) Reconcile(ctx context.Context) (reconcile.Result, error) {
109110
infrav1.LoadBalancerReadyCondition,
110111
infrav1.LoadBalancerServiceSyncFailedReason,
111112
clusterv1.ConditionSeverityWarning,
113+
"%s",
112114
err.Error(),
113115
)
114116
return reconcile.Result{}, fmt.Errorf("failed to reconcile services: %w", err)
@@ -137,6 +139,7 @@ func (s *Service) reconcileNetworkAttachement(ctx context.Context, lb *hcloud.Lo
137139
infrav1.LoadBalancerReadyCondition,
138140
infrav1.NetworkAttachFailedReason,
139141
clusterv1.ConditionSeverityWarning,
142+
"%s",
140143
err.Error(),
141144
)
142145

@@ -165,6 +168,7 @@ func (s *Service) reconcileNetworkAttachement(ctx context.Context, lb *hcloud.Lo
165168
infrav1.LoadBalancerReadyCondition,
166169
infrav1.NetworkAttachFailedReason,
167170
clusterv1.ConditionSeverityError,
171+
"%s",
168172
err.Error(),
169173
)
170174
return err
@@ -293,6 +297,7 @@ func (s *Service) createLoadBalancer(ctx context.Context) (*hcloud.LoadBalancer,
293297
infrav1.LoadBalancerReadyCondition,
294298
infrav1.LoadBalancerCreateFailedReason,
295299
clusterv1.ConditionSeverityError,
300+
"%s",
296301
err.Error(),
297302
)
298303
record.Warnf(s.scope.HetznerCluster, "FailedCreateLoadBalancer", err.Error())
@@ -370,6 +375,7 @@ func (s *Service) Delete(ctx context.Context) (err error) {
370375
infrav1.LoadBalancerReadyCondition,
371376
infrav1.LoadBalancerUpdateFailedReason,
372377
clusterv1.ConditionSeverityWarning,
378+
"%s",
373379
err.Error(),
374380
)
375381
return err
@@ -394,6 +400,7 @@ func (s *Service) Delete(ctx context.Context) (err error) {
394400
infrav1.LoadBalancerReadyCondition,
395401
infrav1.LoadBalancerDeleteFailedReason,
396402
clusterv1.ConditionSeverityWarning,
403+
"%s",
397404
err.Error(),
398405
)
399406
return err
@@ -448,6 +455,7 @@ func (s *Service) ownExistingLoadBalancer(ctx context.Context) (*hcloud.LoadBala
448455
infrav1.LoadBalancerReadyCondition,
449456
infrav1.LoadBalancerFailedToOwnReason,
450457
clusterv1.ConditionSeverityError,
458+
"%s",
451459
fmt.Sprintf("load balancer %q not found", name),
452460
)
453461
return nil, ErrNoLoadBalancerAvailable
@@ -462,6 +470,7 @@ func (s *Service) ownExistingLoadBalancer(ctx context.Context) (*hcloud.LoadBala
462470
infrav1.LoadBalancerReadyCondition,
463471
infrav1.LoadBalancerFailedToOwnReason,
464472
clusterv1.ConditionSeverityError,
473+
"%s",
465474
fmt.Sprintf("load balancer %q already owned with label %q", name, label),
466475
)
467476
return nil, ErrNoLoadBalancerAvailable
@@ -485,6 +494,7 @@ func (s *Service) ownExistingLoadBalancer(ctx context.Context) (*hcloud.LoadBala
485494
infrav1.LoadBalancerReadyCondition,
486495
infrav1.LoadBalancerFailedToOwnReason,
487496
clusterv1.ConditionSeverityError,
497+
"%s",
488498
err.Error(),
489499
)
490500
return nil, err

0 commit comments

Comments
 (0)