Skip to content

Commit 6e0e3ff

Browse files
authored
Merge pull request openshift-kni#1521 from openshift-kni/e2e-serial-klog-cleanup
AA: e2e serial klog cleanup: move to structured logging
2 parents 09677e5 + 8851936 commit 6e0e3ff

28 files changed

+318
-269
lines changed

test/e2e/install/install_test.go

Lines changed: 31 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -87,17 +87,17 @@ var _ = Describe("[Install] continuousIntegration", Serial, func() {
8787
err := wait.PollUntilContextTimeout(context.Background(), 10*time.Second, 5*time.Minute, immediate, func(ctx context.Context) (bool, error) {
8888
err := e2eclient.Client.Get(ctx, nname, updatedNROObj)
8989
if err != nil {
90-
klog.Warningf("failed to get the NRO resource: %v", err)
90+
klog.ErrorS(err, "failed to get the NRO resource")
9191
return false, err
9292
}
9393

9494
cond := status.FindCondition(updatedNROObj.Status.Conditions, status.ConditionAvailable)
9595
if cond == nil {
96-
klog.Warningf("missing conditions in %v", updatedNROObj)
96+
klog.InfoS("missing conditions", "nroObj", updatedNROObj)
9797
return false, err
9898
}
9999

100-
klog.Infof("condition for %s: %v", nname.Name, cond)
100+
klog.InfoS("condition", "name", nname.Name, "condition", cond)
101101
return cond.Status == metav1.ConditionTrue, nil
102102
})
103103
if err != nil {
@@ -117,27 +117,27 @@ var _ = Describe("[Install] continuousIntegration", Serial, func() {
117117
Eventually(func() bool {
118118
ds, err := getDaemonSetByOwnerReference(updatedNROObj.UID)
119119
if err != nil {
120-
klog.Warningf("unable to get Daemonset %v", err)
120+
klog.ErrorS(err, "unable to get Daemonset")
121121
return false
122122
}
123123

124124
if ds.Status.NumberMisscheduled != 0 {
125-
klog.Warningf(" Misscheduled: There are %d nodes that should not be running Daemon pod but are", ds.Status.NumberMisscheduled)
125+
klog.InfoS("Misscheduled: There are nodes that should not be running Daemon pod, but they are", "count", ds.Status.NumberMisscheduled)
126126
return false
127127
}
128128

129129
if ds.Status.NumberUnavailable != 0 {
130-
klog.Infof(" NumberUnavailable %d (should be 0)", ds.Status.NumberUnavailable)
130+
klog.InfoS("NumberUnavailable mismatch", "current", ds.Status.NumberUnavailable, "desired", 0)
131131
return false
132132
}
133133

134134
if ds.Status.CurrentNumberScheduled != ds.Status.DesiredNumberScheduled {
135-
klog.Infof(" CurrentNumberScheduled %d (should be %d)", ds.Status.CurrentNumberScheduled, ds.Status.DesiredNumberScheduled)
135+
klog.InfoS("CurrentNumberScheduled mismatch", "current", ds.Status.CurrentNumberScheduled, "desired", ds.Status.DesiredNumberScheduled)
136136
return false
137137
}
138138

139139
if ds.Status.NumberReady != ds.Status.DesiredNumberScheduled {
140-
klog.Infof(" NumberReady %d (should be %d)", ds.Status.CurrentNumberScheduled, ds.Status.DesiredNumberScheduled)
140+
klog.InfoS("NumberReady mismatch", "current", ds.Status.NumberReady, "desired", ds.Status.DesiredNumberScheduled)
141141
return false
142142
}
143143
return true
@@ -176,17 +176,17 @@ var _ = Describe("[Install] durability", Serial, func() {
176176
updatedNROObj := &nropv1.NUMAResourcesOperator{}
177177
err := e2eclient.Client.Get(context.TODO(), client.ObjectKeyFromObject(nroObj), updatedNROObj)
178178
if err != nil {
179-
klog.Warningf("failed to get the NUMAResourcesOperator CR: %v", err)
179+
klog.ErrorS(err, "failed to get the NUMAResourcesOperator CR")
180180
return false
181181
}
182182

183183
cond := status.FindCondition(updatedNROObj.Status.Conditions, status.ConditionDegraded)
184184
if cond == nil {
185-
klog.Warningf("missing conditions in %v", updatedNROObj)
185+
klog.InfoS("missing conditions", "nroObj", updatedNROObj)
186186
return false
187187
}
188188

189-
klog.Infof("condition: %v", cond)
189+
klog.InfoS("condition", "condition", cond)
190190

191191
return cond.Status == metav1.ConditionTrue
192192
}).WithTimeout(5*time.Minute).WithPolling(10*time.Second).Should(BeTrue(), "NUMAResourcesOperator condition did not become degraded")
@@ -219,7 +219,7 @@ var _ = Describe("[Install] durability", Serial, func() {
219219
return false, err
220220
}
221221
if len(nroObj.Status.DaemonSets) != 1 {
222-
klog.Warningf("unsupported daemonsets (/MCP) count: %d", len(nroObj.Status.DaemonSets))
222+
klog.InfoS("unsupported daemonsets (/MCP)", "count", len(nroObj.Status.DaemonSets))
223223
return false, nil
224224
}
225225
return true, nil
@@ -253,22 +253,18 @@ var _ = Describe("[Install] durability", Serial, func() {
253253
updatedDs := &appsv1.DaemonSet{}
254254
err := e2eclient.Client.Get(context.TODO(), dsKey.AsKey(), updatedDs)
255255
if err != nil {
256-
klog.Warningf("failed to get the daemonset %s: %v", dsKey.String(), err)
256+
klog.ErrorS(err, "failed to get the daemonset", "key", dsKey.String())
257257
return false
258258
}
259259

260260
if !nrowait.AreDaemonSetPodsReady(&updatedDs.Status) {
261-
klog.Warningf("daemonset %s desired %d scheduled %d ready %d",
262-
dsKey.String(),
263-
updatedDs.Status.DesiredNumberScheduled,
264-
updatedDs.Status.CurrentNumberScheduled,
265-
updatedDs.Status.NumberReady)
261+
klog.InfoS("daemonset not ready", "key", dsKey.String(), "desired", updatedDs.Status.DesiredNumberScheduled, "scheduled", updatedDs.Status.CurrentNumberScheduled, "ready", updatedDs.Status.NumberReady)
266262
return false
267263
}
268264

269-
klog.Infof("daemonset %s ready", dsKey.String())
265+
klog.InfoS("daemonset ready", "key", dsKey.String())
270266

271-
klog.Warningf("daemonset Generation observed %v current %v", updatedDs.Status.ObservedGeneration, ds.Generation)
267+
klog.InfoS("daemonset Generation", "observedGeneration", updatedDs.Status.ObservedGeneration, "currentGeneration", ds.Generation)
272268
isUpdated := updatedDs.Status.ObservedGeneration > ds.Generation
273269
if !isUpdated {
274270
return false
@@ -315,9 +311,9 @@ var _ = Describe("[Install] durability", Serial, func() {
315311
key := client.ObjectKeyFromObject(obj)
316312
if err := e2eclient.Client.Get(context.TODO(), key, obj); !errors.IsNotFound(err) {
317313
if err == nil {
318-
klog.Warningf("obj %s still exists", key.String())
314+
klog.InfoS("obj still exists", "key", key.String())
319315
} else {
320-
klog.Warningf("obj %s return with error: %v", key.String(), err)
316+
klog.ErrorS(err, "obj return with error", "key", key.String())
321317
}
322318
return false
323319
}
@@ -352,8 +348,8 @@ var _ = Describe("[Install] durability", Serial, func() {
352348

353349
ds, err := getDaemonSetByOwnerReference(updatedNroObj.GetUID())
354350
if err != nil {
355-
klog.Warningf("failed to get the RTE DaemonSet: %v", err)
356-
klog.Warningf("NRO:\n%s\n", objects.ToYAML(updatedNroObj))
351+
// TODO: multi-line value in structured log
352+
klog.ErrorS(err, "failed to get the RTE DaemonSet", "nroYAML", objects.ToYAML(updatedNroObj))
357353
return false
358354
}
359355

@@ -369,12 +365,13 @@ var _ = Describe("[Install] durability", Serial, func() {
369365
Expect(err).ToNot(HaveOccurred())
370366

371367
if len(updatedConfigMaps.Items) != 1 {
372-
klog.Warningf("expected exactly 1 RTE configmap, got: %d", len(updatedConfigMaps.Items))
368+
klog.InfoS("expected exactly configmap", "current", len(updatedConfigMaps.Items), "desired", 1)
373369
return false
374370
}
375371
rteConfigMap = &updatedConfigMaps.Items[0]
376372
return true
377373
}).WithTimeout(5 * time.Minute).WithPolling(10 * time.Second).Should(BeTrue())
374+
// TODO: multi-line value in structured log
378375
klog.InfoS("found RTE configmap", "rteConfigMap", rteConfigMap)
379376

380377
cfg, err := configuration.ValidateAndExtractRTEConfigData(rteConfigMap)
@@ -392,7 +389,7 @@ var _ = Describe("[Install] durability", Serial, func() {
392389
// the same configuration should apply to all NRT objects
393390
matchingErr := configuration.CheckTopologyManagerConfigMatching(nrt, &cfg)
394391
if matchingErr != "" {
395-
klog.Warningf("NRT %q doesn't match topologyManager configuration: %s", nrt.Name, matchingErr)
392+
klog.InfoS("NRT doesn't match topologyManager configuration", "name", nrt.Name, "problem", matchingErr)
396393
return false
397394
}
398395
}
@@ -451,18 +448,18 @@ func getDaemonSetByOwnerReference(uid types.UID) (*appsv1.DaemonSet, error) {
451448
func logRTEPodsLogs(cli client.Client, k8sCli *kubernetes.Clientset, ctx context.Context, nroObj *nropv1.NUMAResourcesOperator, reason string) {
452449
dss, err := objects.GetDaemonSetsOwnedBy(cli, nroObj.ObjectMeta)
453450
if err != nil {
454-
klog.Warningf("no DaemonSets for %s (%s)", nroObj.Name, nroObj.GetUID())
451+
klog.InfoS("no DaemonSets", "nroName", nroObj.Name, "nroUID", nroObj.GetUID())
455452
return
456453
}
457454

458-
klog.Infof("%s (%d DaemonSet)", reason, len(dss))
455+
klog.InfoS("logging RTE pods", "reason", reason, "daemonsetCount", len(dss))
459456

460457
for _, ds := range dss {
461-
klog.Infof("daemonset %s/%s desired %d scheduled %d ready %d", ds.Namespace, ds.Name, ds.Status.DesiredNumberScheduled, ds.Status.CurrentNumberScheduled, ds.Status.NumberReady)
458+
klog.InfoS("daemonset status", "namespace", ds.Namespace, "name", ds.Name, "desired", ds.Status.DesiredNumberScheduled, "scheduled", ds.Status.CurrentNumberScheduled, "ready", ds.Status.NumberReady)
462459

463460
labSel, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
464461
if err != nil {
465-
klog.Warningf("cannot use DaemonSet label selector as selector: %v", err)
462+
klog.ErrorS(err, "cannot use DaemonSet label selector as selector")
466463
continue
467464
}
468465

@@ -472,17 +469,18 @@ func logRTEPodsLogs(cli client.Client, k8sCli *kubernetes.Clientset, ctx context
472469
LabelSelector: labSel,
473470
})
474471
if err != nil {
475-
klog.Warningf("cannot get Pods by DaemonSet %s/%s: %v", ds.Namespace, ds.Name, err)
472+
klog.ErrorS(err, "cannot get Pods by DaemonSet", "namespace", ds.Namespace, "name", ds.Name)
476473
continue
477474
}
478475

479476
for _, pod := range podList.Items {
480477
logs, err := objects.GetLogsForPod(k8sCli, pod.Namespace, pod.Name, containerNameRTE)
481478
if err != nil {
482-
klog.Warningf("DaemonSet %s/%s -> Pod %s/%s -> error getting logs: %v", ds.Namespace, ds.Name, pod.Namespace, pod.Name, err)
479+
klog.ErrorS(err, "cannot fetch logs", "dsNamespace", ds.Namespace, "dsName", ds.Name, "podNamespace", pod.Namespace, "podName", pod.Name)
483480
continue
484481
}
485-
klog.Infof("DaemonSet %s/%s -> Pod %s/%s -> logs:\n%s\n-----\n", ds.Namespace, ds.Name, pod.Namespace, pod.Name, logs)
482+
// TODO: multi-line value in structured log
483+
klog.InfoS("fetched logs", "dsNamespace", ds.Namespace, "dsName", ds.Name, "podNamespace", pod.Namespace, "podName", pod.Name, "logs", logs)
486484
}
487485
}
488486
}

test/e2e/must-gather/must_gather_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ var _ = Describe("[must-gather] NRO data collected", func() {
9191
}
9292
err := os.RemoveAll(destDir)
9393
if err != nil {
94-
klog.Warningf("unable to remove temporary directory %q: %v", destDir, err)
94+
klog.ErrorS(err, "unable to remove temporary directory", "path", destDir)
9595
}
9696
})
9797

test/e2e/rte/rte_test.go

Lines changed: 19 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -71,11 +71,11 @@ var _ = ginkgo.Describe("with a running cluster with all the components", func()
7171
gomega.Eventually(func() bool {
7272
rteDss, err := getOwnedDss(clients.K8sClient, nropObj.ObjectMeta)
7373
if err != nil {
74-
klog.Warningf("failed to get the owned DaemonSets: %v", err)
74+
klog.ErrorS(err, "failed to get the owned DaemonSets")
7575
return false
7676
}
7777
if len(rteDss) == 0 {
78-
klog.Warningf("expect the numaresourcesoperator to own at least one DaemonSet")
78+
klog.InfoS("expect the numaresourcesoperator to own at least one DaemonSet")
7979
return false
8080
}
8181

@@ -85,11 +85,11 @@ var _ = ginkgo.Describe("with a running cluster with all the components", func()
8585

8686
found, match := matchLogLevelToKlog(rteCnt, nropObj.Spec.LogLevel)
8787
if !found {
88-
klog.Warningf("-v flag doesn't exist in container %q args managed by DaemonSet: %q", rteCnt.Name, ds.Name)
88+
klog.InfoS("-v flag doesn't exist in container args managed by DaemonSet", "containerName", rteCnt.Name, "daemonsetName", ds.Name)
8989
return false
9090
}
9191
if !match {
92-
klog.Warningf("LogLevel %s doesn't match the existing -v flag in container: %q under DaemonSet: %q", nropObj.Spec.LogLevel, rteCnt.Name, ds.Name)
92+
klog.InfoS("LogLevel doesn't match the existing -v flag in container under DaemonSet", "logLevel", nropObj.Spec.LogLevel, "containerName", rteCnt.Name, "daemonsetName", ds.Name)
9393
return false
9494
}
9595
}
@@ -110,11 +110,11 @@ var _ = ginkgo.Describe("with a running cluster with all the components", func()
110110
gomega.Eventually(func() bool {
111111
rteDss, err := getOwnedDss(clients.K8sClient, nropObj.ObjectMeta)
112112
if err != nil {
113-
klog.Warningf("failed to get the owned DaemonSets: %v", err)
113+
klog.ErrorS(err, "failed to get the owned DaemonSets")
114114
return false
115115
}
116116
if len(rteDss) == 0 {
117-
klog.Warningf("expect the numaresourcesoperator to own at least one DaemonSet")
117+
klog.InfoS("expect the numaresourcesoperator to own at least one DaemonSet")
118118
return false
119119
}
120120

@@ -124,12 +124,12 @@ var _ = ginkgo.Describe("with a running cluster with all the components", func()
124124

125125
found, match := matchLogLevelToKlog(rteCnt, nropObj.Spec.LogLevel)
126126
if !found {
127-
klog.Warningf("-v flag doesn't exist in container %q args under DaemonSet: %q", rteCnt.Name, ds.Name)
127+
klog.InfoS("-v flag doesn't exist in container args under DaemonSet", "containerName", rteCnt.Name, "daemonsetName", ds.Name)
128128
return false
129129
}
130130

131131
if !match {
132-
klog.Warningf("LogLevel %s doesn't match the existing -v flag in container: %q managed by DaemonSet: %q", nropObj.Spec.LogLevel, rteCnt.Name, ds.Name)
132+
klog.InfoS("LogLevel doesn't match the existing -v flag in container managed by DaemonSet", "logLevel", nropObj.Spec.LogLevel, "containerName", rteCnt.Name, "daemonsetName", ds.Name)
133133
return false
134134
}
135135
}
@@ -147,17 +147,17 @@ var _ = ginkgo.Describe("with a running cluster with all the components", func()
147147
gomega.Expect(nropObj.Status.DaemonSets).ToNot(gomega.BeEmpty())
148148
dssFromNodeGroupStatus := testobjs.GetDaemonSetListFromNodeGroupStatuses(nropObj.Status.NodeGroups)
149149
gomega.Expect(reflect.DeepEqual(nropObj.Status.DaemonSets, dssFromNodeGroupStatus)).To(gomega.BeTrue())
150-
klog.Infof("NRO %q", nropObj.Name)
150+
klog.InfoS("using NRO instance", "name", nropObj.Name)
151151

152152
// NROP guarantees all the daemonsets are in the same namespace,
153153
// so we pick the first for the sake of brevity
154154
namespace := nropObj.Status.DaemonSets[0].Namespace
155-
klog.Infof("namespace %q", namespace)
155+
klog.InfoS("Using NRO namespace", "namespace", namespace)
156156

157157
mcpList := &mcov1.MachineConfigPoolList{}
158158
err = clients.Client.List(context.TODO(), mcpList)
159159
gomega.Expect(err).ToNot(gomega.HaveOccurred())
160-
klog.Infof("MCPs count: %d", len(mcpList.Items))
160+
klog.InfoS("detected MCPs", "count", len(mcpList.Items))
161161

162162
mcoKcList := &mcov1.KubeletConfigList{}
163163
err = clients.Client.List(context.TODO(), mcoKcList)
@@ -176,12 +176,12 @@ var _ = ginkgo.Describe("with a running cluster with all the components", func()
176176
gomega.Expect(err).ToNot(gomega.HaveOccurred())
177177

178178
generatedName := objectnames.GetComponentName(nropObj.Name, mcp.Name)
179-
klog.Infof("generated config map name: %q", generatedName)
179+
klog.InfoS("generated config map", "name", generatedName)
180180
cm, err := clients.K8sClient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), generatedName, metav1.GetOptions{})
181181
gomega.Expect(err).ToNot(gomega.HaveOccurred())
182182

183183
rc, err := rteConfigMapToRTEConfig(cm)
184-
klog.Infof("RTE config: %#v", rc)
184+
klog.InfoS("Using RTE", "config", rc)
185185
gomega.Expect(err).ToNot(gomega.HaveOccurred())
186186

187187
gomega.Expect(rc.Kubelet.TopologyManagerPolicy).To(gomega.Equal(kc.TopologyManagerPolicy), "TopologyManager Policy mismatch")
@@ -196,17 +196,17 @@ var _ = ginkgo.Describe("with a running cluster with all the components", func()
196196
gomega.Expect(nropObj.Status.DaemonSets).ToNot(gomega.BeEmpty())
197197
dssFromNodeGroupStatus := testobjs.GetDaemonSetListFromNodeGroupStatuses(nropObj.Status.NodeGroups)
198198
gomega.Expect(reflect.DeepEqual(nropObj.Status.DaemonSets, dssFromNodeGroupStatus)).To(gomega.BeTrue())
199-
klog.Infof("NRO %q", nropObj.Name)
199+
klog.InfoS("Using NRO instance", "name", nropObj.Name)
200200

201201
// NROP guarantees all the daemonsets are in the same namespace,
202202
// so we pick the first for the sake of brevity
203203
namespace := nropObj.Status.DaemonSets[0].Namespace
204-
klog.Infof("namespace %q", namespace)
204+
klog.InfoS("Using NRO namespace", "namespace", namespace)
205205

206206
mcpList := &mcov1.MachineConfigPoolList{}
207207
err = clients.Client.List(context.TODO(), mcpList)
208208
gomega.Expect(err).ToNot(gomega.HaveOccurred())
209-
klog.Infof("MCPs count: %d", len(mcpList.Items))
209+
klog.InfoS("detected MCPs", "count", len(mcpList.Items))
210210

211211
mcoKcList := &mcov1.KubeletConfigList{}
212212
err = clients.Client.List(context.TODO(), mcoKcList)
@@ -224,7 +224,7 @@ var _ = ginkgo.Describe("with a running cluster with all the components", func()
224224
gomega.Expect(err).ToNot(gomega.HaveOccurred())
225225

226226
generatedName := objectnames.GetComponentName(nropObj.Name, mcp.Name)
227-
klog.Infof("generated config map name: %q", generatedName)
227+
klog.InfoS("generated config map", "name", generatedName)
228228
cm, err := clients.K8sClient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), generatedName, metav1.GetOptions{})
229229
gomega.Expect(err).ToNot(gomega.HaveOccurred())
230230

@@ -242,7 +242,7 @@ var _ = ginkgo.Describe("with a running cluster with all the components", func()
242242
gomega.Expect(err).ToNot(gomega.HaveOccurred())
243243

244244
if !reflect.DeepEqual(cm.Data, desiredMapState) {
245-
klog.Warningf("ConfigMap %q data is not in it's desired state, waiting for controller to update it", cm.Name)
245+
klog.InfoS("ConfigMap data is not in it's desired state, waiting for controller to update it", "configMapName", cm.Name)
246246
return false
247247
}
248248
return true
@@ -298,7 +298,7 @@ var _ = ginkgo.Describe("with a running cluster with all the components", func()
298298
err = json.Unmarshal(stdout, &st)
299299
gomega.Expect(err).ToNot(gomega.HaveOccurred())
300300

301-
klog.Infof("got status from %s/%s/%s -> %q (%d pods)", rtePod.Namespace, rtePod.Name, rteCnt.Name, st.FingerprintComputed, len(st.Pods))
301+
klog.InfoS("got status", "podNamespace", rtePod.Namespace, "podName", rtePod.Name, "containerName", rteCnt.Name, "fingerprintComputed", st.FingerprintComputed, "podCount", len(st.Pods))
302302

303303
gomega.Expect(st.FingerprintComputed).ToNot(gomega.BeEmpty(), "missing fingerprint - should always be reported")
304304
gomega.Expect(st.Pods).ToNot(gomega.BeEmpty(), "missing pods - at least RTE itself should be there")

test/e2e/sched/install/install_test.go

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -61,18 +61,17 @@ var _ = Describe("[Scheduler] install", func() {
6161
updatedNROObj := &nropv1.NUMAResourcesScheduler{}
6262
err := e2eclient.Client.Get(context.TODO(), client.ObjectKeyFromObject(nroSchedObj), updatedNROObj)
6363
if err != nil {
64-
klog.Warningf("failed to get the NRO Scheduler resource: %v", err)
64+
klog.ErrorS(err, "failed to get the NRO Scheduler resource")
6565
return false
6666
}
6767

6868
cond := status.FindCondition(updatedNROObj.Status.Conditions, status.ConditionAvailable)
6969
if cond == nil {
70-
klog.Warningf("missing conditions in %v", updatedNROObj)
70+
klog.InfoS("missing conditions", "nroObj", updatedNROObj)
7171
return false
7272
}
7373

74-
klog.Infof("condition: %v", cond)
75-
klog.Infof("conditions: %v", updatedNROObj.Status.Conditions)
74+
klog.InfoS("scheduler status", "availableCondition", cond, "conditions", updatedNROObj.Status.Conditions)
7675

7776
return cond.Status == metav1.ConditionTrue
7877
}).WithTimeout(5*time.Minute).WithPolling(10*time.Second).Should(BeTrue(), "NRO Scheduler condition did not become available")
@@ -85,12 +84,12 @@ var _ = Describe("[Scheduler] install", func() {
8584
Eventually(func() bool {
8685
deployment, err = podlist.With(e2eclient.Client).DeploymentByOwnerReference(context.TODO(), nroSchedObj.UID)
8786
if err != nil {
88-
klog.Warningf("unable to get deployment by owner reference: %v", err)
87+
klog.ErrorS(err, "unable to get deployment by owner reference")
8988
return false
9089
}
9190

9291
if deployment.Status.ReadyReplicas != *deployment.Spec.Replicas {
93-
klog.Warningf("Invalid number of ready replicas: desired: %d, actual: %d", *deployment.Spec.Replicas, deployment.Status.ReadyReplicas)
92+
klog.InfoS("Invalid number of ready replicas", "current", deployment.Status.ReadyReplicas, "desired", *deployment.Spec.Replicas)
9493
return false
9594
}
9695
return true

0 commit comments

Comments
 (0)