Skip to content

Commit 8851936

Browse files
committed
[AA][HI] e2e: klog: Infof -> InfoS
These changes where AI-Assisted (hence the AA tag), then largely amended by a human (hence the HI tag - Human Intervention). Finding manually all the instances: ``` git grep Infof test/e2e | cut -d: -f1 | uniq -c ``` Initial prompt: ``` Translate the calls to `klog.Infof` to `klog.InfoS`. Since the function signatures are different, rearrange the parameters to match the signature of the `InfoS` function. Modify only the file `test/e2e/tools/mkginkgolabelfilter_test.go` ``` Subsequent N prompts (roughly 15-25 calls to fix per prompt) ``` do the same for the files ... ``` files picked randomly by the user Assisted-by: Cursor AI Assisted-by-model: claude-4-sonnet Signed-off-by: Francesco Romani <[email protected]>
1 parent 02af389 commit 8851936

18 files changed

+231
-182
lines changed

test/e2e/install/install_test.go

Lines changed: 20 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -93,11 +93,11 @@ var _ = Describe("[Install] continuousIntegration", Serial, func() {
9393

9494
cond := status.FindCondition(updatedNROObj.Status.Conditions, status.ConditionAvailable)
9595
if cond == nil {
96-
klog.Infof("missing conditions in %v", updatedNROObj)
96+
klog.InfoS("missing conditions", "nroObj", updatedNROObj)
9797
return false, err
9898
}
9999

100-
klog.Infof("condition for %s: %v", nname.Name, cond)
100+
klog.InfoS("condition", "name", nname.Name, "condition", cond)
101101
return cond.Status == metav1.ConditionTrue, nil
102102
})
103103
if err != nil {
@@ -122,22 +122,22 @@ var _ = Describe("[Install] continuousIntegration", Serial, func() {
122122
}
123123

124124
if ds.Status.NumberMisscheduled != 0 {
125-
klog.Infof(" Misscheduled: There are %d nodes that should not be running Daemon pod but are", ds.Status.NumberMisscheduled)
125+
klog.InfoS("Misscheduled: There are nodes that should not be running Daemon pod, but they are", "count", ds.Status.NumberMisscheduled)
126126
return false
127127
}
128128

129129
if ds.Status.NumberUnavailable != 0 {
130-
klog.Infof(" NumberUnavailable %d (should be 0)", ds.Status.NumberUnavailable)
130+
klog.InfoS("NumberUnavailable mismatch", "current", ds.Status.NumberUnavailable, "desired", 0)
131131
return false
132132
}
133133

134134
if ds.Status.CurrentNumberScheduled != ds.Status.DesiredNumberScheduled {
135-
klog.Infof(" CurrentNumberScheduled %d (should be %d)", ds.Status.CurrentNumberScheduled, ds.Status.DesiredNumberScheduled)
135+
klog.InfoS("CurrentNumberScheduled mismatch", "current", ds.Status.CurrentNumberScheduled, "desired", ds.Status.DesiredNumberScheduled)
136136
return false
137137
}
138138

139139
if ds.Status.NumberReady != ds.Status.DesiredNumberScheduled {
140-
klog.Infof(" NumberReady %d (should be %d)", ds.Status.CurrentNumberScheduled, ds.Status.DesiredNumberScheduled)
140+
klog.InfoS("NumberReady mismatch", "current", ds.Status.NumberReady, "desired", ds.Status.DesiredNumberScheduled)
141141
return false
142142
}
143143
return true
@@ -182,11 +182,11 @@ var _ = Describe("[Install] durability", Serial, func() {
182182

183183
cond := status.FindCondition(updatedNROObj.Status.Conditions, status.ConditionDegraded)
184184
if cond == nil {
185-
klog.Infof("missing conditions in %v", updatedNROObj)
185+
klog.InfoS("missing conditions", "nroObj", updatedNROObj)
186186
return false
187187
}
188188

189-
klog.Infof("condition: %v", cond)
189+
klog.InfoS("condition", "condition", cond)
190190

191191
return cond.Status == metav1.ConditionTrue
192192
}).WithTimeout(5*time.Minute).WithPolling(10*time.Second).Should(BeTrue(), "NUMAResourcesOperator condition did not become degraded")
@@ -219,7 +219,7 @@ var _ = Describe("[Install] durability", Serial, func() {
219219
return false, err
220220
}
221221
if len(nroObj.Status.DaemonSets) != 1 {
222-
klog.Infof("unsupported daemonsets (/MCP) count: %d", len(nroObj.Status.DaemonSets))
222+
klog.InfoS("unsupported daemonsets (/MCP)", "count", len(nroObj.Status.DaemonSets))
223223
return false, nil
224224
}
225225
return true, nil
@@ -258,17 +258,13 @@ var _ = Describe("[Install] durability", Serial, func() {
258258
}
259259

260260
if !nrowait.AreDaemonSetPodsReady(&updatedDs.Status) {
261-
klog.Infof("daemonset %s desired %d scheduled %d ready %d",
262-
dsKey.String(),
263-
updatedDs.Status.DesiredNumberScheduled,
264-
updatedDs.Status.CurrentNumberScheduled,
265-
updatedDs.Status.NumberReady)
261+
klog.InfoS("daemonset not ready", "key", dsKey.String(), "desired", updatedDs.Status.DesiredNumberScheduled, "scheduled", updatedDs.Status.CurrentNumberScheduled, "ready", updatedDs.Status.NumberReady)
266262
return false
267263
}
268264

269-
klog.Infof("daemonset %s ready", dsKey.String())
265+
klog.InfoS("daemonset ready", "key", dsKey.String())
270266

271-
klog.Infof("daemonset Generation observed %v current %v", updatedDs.Status.ObservedGeneration, ds.Generation)
267+
klog.InfoS("daemonset Generation", "observedGeneration", updatedDs.Status.ObservedGeneration, "currentGeneration", ds.Generation)
272268
isUpdated := updatedDs.Status.ObservedGeneration > ds.Generation
273269
if !isUpdated {
274270
return false
@@ -315,7 +311,7 @@ var _ = Describe("[Install] durability", Serial, func() {
315311
key := client.ObjectKeyFromObject(obj)
316312
if err := e2eclient.Client.Get(context.TODO(), key, obj); !errors.IsNotFound(err) {
317313
if err == nil {
318-
klog.Infof("obj %s still exists", key.String())
314+
klog.InfoS("obj still exists", "key", key.String())
319315
} else {
320316
klog.ErrorS(err, "obj return with error", "key", key.String())
321317
}
@@ -369,12 +365,13 @@ var _ = Describe("[Install] durability", Serial, func() {
369365
Expect(err).ToNot(HaveOccurred())
370366

371367
if len(updatedConfigMaps.Items) != 1 {
372-
klog.Infof("expected exactly 1 RTE configmap, got: %d", len(updatedConfigMaps.Items))
368+
klog.InfoS("expected exactly configmap", "current", len(updatedConfigMaps.Items), "desired", 1)
373369
return false
374370
}
375371
rteConfigMap = &updatedConfigMaps.Items[0]
376372
return true
377373
}).WithTimeout(5 * time.Minute).WithPolling(10 * time.Second).Should(BeTrue())
374+
// TODO: multi-line value in structured log
378375
klog.InfoS("found RTE configmap", "rteConfigMap", rteConfigMap)
379376

380377
cfg, err := configuration.ValidateAndExtractRTEConfigData(rteConfigMap)
@@ -451,14 +448,14 @@ func getDaemonSetByOwnerReference(uid types.UID) (*appsv1.DaemonSet, error) {
451448
func logRTEPodsLogs(cli client.Client, k8sCli *kubernetes.Clientset, ctx context.Context, nroObj *nropv1.NUMAResourcesOperator, reason string) {
452449
dss, err := objects.GetDaemonSetsOwnedBy(cli, nroObj.ObjectMeta)
453450
if err != nil {
454-
klog.Infof("no DaemonSets for %s (%s)", nroObj.Name, nroObj.GetUID())
451+
klog.InfoS("no DaemonSets", "nroName", nroObj.Name, "nroUID", nroObj.GetUID())
455452
return
456453
}
457454

458-
klog.Infof("%s (%d DaemonSet)", reason, len(dss))
455+
klog.InfoS("logging RTE pods", "reason", reason, "daemonsetCount", len(dss))
459456

460457
for _, ds := range dss {
461-
klog.Infof("daemonset %s/%s desired %d scheduled %d ready %d", ds.Namespace, ds.Name, ds.Status.DesiredNumberScheduled, ds.Status.CurrentNumberScheduled, ds.Status.NumberReady)
458+
klog.InfoS("daemonset status", "namespace", ds.Namespace, "name", ds.Name, "desired", ds.Status.DesiredNumberScheduled, "scheduled", ds.Status.CurrentNumberScheduled, "ready", ds.Status.NumberReady)
462459

463460
labSel, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector)
464461
if err != nil {
@@ -482,7 +479,8 @@ func logRTEPodsLogs(cli client.Client, k8sCli *kubernetes.Clientset, ctx context
482479
klog.ErrorS(err, "cannot fetch logs", "dsNamespace", ds.Namespace, "dsName", ds.Name, "podNamespace", pod.Namespace, "podName", pod.Name)
483480
continue
484481
}
485-
klog.Infof("DaemonSet %s/%s -> Pod %s/%s -> logs:\n%s\n-----\n", ds.Namespace, ds.Name, pod.Namespace, pod.Name, logs)
482+
// TODO: multi-line value in structured log
483+
klog.InfoS("fetched logs", "dsNamespace", ds.Namespace, "dsName", ds.Name, "podNamespace", pod.Namespace, "podName", pod.Name, "logs", logs)
486484
}
487485
}
488486
}

test/e2e/rte/rte_test.go

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,7 @@ var _ = ginkgo.Describe("with a running cluster with all the components", func()
7575
return false
7676
}
7777
if len(rteDss) == 0 {
78-
klog.Infof("expect the numaresourcesoperator to own at least one DaemonSet")
78+
klog.InfoS("expect the numaresourcesoperator to own at least one DaemonSet")
7979
return false
8080
}
8181

@@ -85,11 +85,11 @@ var _ = ginkgo.Describe("with a running cluster with all the components", func()
8585

8686
found, match := matchLogLevelToKlog(rteCnt, nropObj.Spec.LogLevel)
8787
if !found {
88-
klog.Infof("-v flag doesn't exist in container %q args managed by DaemonSet: %q", rteCnt.Name, ds.Name)
88+
klog.InfoS("-v flag doesn't exist in container args managed by DaemonSet", "containerName", rteCnt.Name, "daemonsetName", ds.Name)
8989
return false
9090
}
9191
if !match {
92-
klog.Infof("LogLevel %s doesn't match the existing -v flag in container: %q under DaemonSet: %q", nropObj.Spec.LogLevel, rteCnt.Name, ds.Name)
92+
klog.InfoS("LogLevel doesn't match the existing -v flag in container under DaemonSet", "logLevel", nropObj.Spec.LogLevel, "containerName", rteCnt.Name, "daemonsetName", ds.Name)
9393
return false
9494
}
9595
}
@@ -114,7 +114,7 @@ var _ = ginkgo.Describe("with a running cluster with all the components", func()
114114
return false
115115
}
116116
if len(rteDss) == 0 {
117-
klog.Infof("expect the numaresourcesoperator to own at least one DaemonSet")
117+
klog.InfoS("expect the numaresourcesoperator to own at least one DaemonSet")
118118
return false
119119
}
120120

@@ -124,12 +124,12 @@ var _ = ginkgo.Describe("with a running cluster with all the components", func()
124124

125125
found, match := matchLogLevelToKlog(rteCnt, nropObj.Spec.LogLevel)
126126
if !found {
127-
klog.Infof("-v flag doesn't exist in container %q args under DaemonSet: %q", rteCnt.Name, ds.Name)
127+
klog.InfoS("-v flag doesn't exist in container args under DaemonSet", "containerName", rteCnt.Name, "daemonsetName", ds.Name)
128128
return false
129129
}
130130

131131
if !match {
132-
klog.Infof("LogLevel %s doesn't match the existing -v flag in container: %q managed by DaemonSet: %q", nropObj.Spec.LogLevel, rteCnt.Name, ds.Name)
132+
klog.InfoS("LogLevel doesn't match the existing -v flag in container managed by DaemonSet", "logLevel", nropObj.Spec.LogLevel, "containerName", rteCnt.Name, "daemonsetName", ds.Name)
133133
return false
134134
}
135135
}
@@ -147,17 +147,17 @@ var _ = ginkgo.Describe("with a running cluster with all the components", func()
147147
gomega.Expect(nropObj.Status.DaemonSets).ToNot(gomega.BeEmpty())
148148
dssFromNodeGroupStatus := testobjs.GetDaemonSetListFromNodeGroupStatuses(nropObj.Status.NodeGroups)
149149
gomega.Expect(reflect.DeepEqual(nropObj.Status.DaemonSets, dssFromNodeGroupStatus)).To(gomega.BeTrue())
150-
klog.Infof("NRO %q", nropObj.Name)
150+
klog.InfoS("using NRO instance", "name", nropObj.Name)
151151

152152
// NROP guarantees all the daemonsets are in the same namespace,
153153
// so we pick the first for the sake of brevity
154154
namespace := nropObj.Status.DaemonSets[0].Namespace
155-
klog.Infof("namespace %q", namespace)
155+
klog.InfoS("Using NRO namespace", "namespace", namespace)
156156

157157
mcpList := &mcov1.MachineConfigPoolList{}
158158
err = clients.Client.List(context.TODO(), mcpList)
159159
gomega.Expect(err).ToNot(gomega.HaveOccurred())
160-
klog.Infof("MCPs count: %d", len(mcpList.Items))
160+
klog.InfoS("detected MCPs", "count", len(mcpList.Items))
161161

162162
mcoKcList := &mcov1.KubeletConfigList{}
163163
err = clients.Client.List(context.TODO(), mcoKcList)
@@ -176,12 +176,12 @@ var _ = ginkgo.Describe("with a running cluster with all the components", func()
176176
gomega.Expect(err).ToNot(gomega.HaveOccurred())
177177

178178
generatedName := objectnames.GetComponentName(nropObj.Name, mcp.Name)
179-
klog.Infof("generated config map name: %q", generatedName)
179+
klog.InfoS("generated config map", "name", generatedName)
180180
cm, err := clients.K8sClient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), generatedName, metav1.GetOptions{})
181181
gomega.Expect(err).ToNot(gomega.HaveOccurred())
182182

183183
rc, err := rteConfigMapToRTEConfig(cm)
184-
klog.Infof("RTE config: %#v", rc)
184+
klog.InfoS("Using RTE", "config", rc)
185185
gomega.Expect(err).ToNot(gomega.HaveOccurred())
186186

187187
gomega.Expect(rc.Kubelet.TopologyManagerPolicy).To(gomega.Equal(kc.TopologyManagerPolicy), "TopologyManager Policy mismatch")
@@ -196,17 +196,17 @@ var _ = ginkgo.Describe("with a running cluster with all the components", func()
196196
gomega.Expect(nropObj.Status.DaemonSets).ToNot(gomega.BeEmpty())
197197
dssFromNodeGroupStatus := testobjs.GetDaemonSetListFromNodeGroupStatuses(nropObj.Status.NodeGroups)
198198
gomega.Expect(reflect.DeepEqual(nropObj.Status.DaemonSets, dssFromNodeGroupStatus)).To(gomega.BeTrue())
199-
klog.Infof("NRO %q", nropObj.Name)
199+
klog.InfoS("Using NRO instance", "name", nropObj.Name)
200200

201201
// NROP guarantees all the daemonsets are in the same namespace,
202202
// so we pick the first for the sake of brevity
203203
namespace := nropObj.Status.DaemonSets[0].Namespace
204-
klog.Infof("namespace %q", namespace)
204+
klog.InfoS("Using NRO namespace", "namespace", namespace)
205205

206206
mcpList := &mcov1.MachineConfigPoolList{}
207207
err = clients.Client.List(context.TODO(), mcpList)
208208
gomega.Expect(err).ToNot(gomega.HaveOccurred())
209-
klog.Infof("MCPs count: %d", len(mcpList.Items))
209+
klog.InfoS("detected MCPs", "count", len(mcpList.Items))
210210

211211
mcoKcList := &mcov1.KubeletConfigList{}
212212
err = clients.Client.List(context.TODO(), mcoKcList)
@@ -224,7 +224,7 @@ var _ = ginkgo.Describe("with a running cluster with all the components", func()
224224
gomega.Expect(err).ToNot(gomega.HaveOccurred())
225225

226226
generatedName := objectnames.GetComponentName(nropObj.Name, mcp.Name)
227-
klog.Infof("generated config map name: %q", generatedName)
227+
klog.InfoS("generated config map", "name", generatedName)
228228
cm, err := clients.K8sClient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), generatedName, metav1.GetOptions{})
229229
gomega.Expect(err).ToNot(gomega.HaveOccurred())
230230

@@ -242,7 +242,7 @@ var _ = ginkgo.Describe("with a running cluster with all the components", func()
242242
gomega.Expect(err).ToNot(gomega.HaveOccurred())
243243

244244
if !reflect.DeepEqual(cm.Data, desiredMapState) {
245-
klog.Infof("ConfigMap %q data is not in it's desired state, waiting for controller to update it", cm.Name)
245+
klog.InfoS("ConfigMap data is not in it's desired state, waiting for controller to update it", "configMapName", cm.Name)
246246
return false
247247
}
248248
return true
@@ -298,7 +298,7 @@ var _ = ginkgo.Describe("with a running cluster with all the components", func()
298298
err = json.Unmarshal(stdout, &st)
299299
gomega.Expect(err).ToNot(gomega.HaveOccurred())
300300

301-
klog.Infof("got status from %s/%s/%s -> %q (%d pods)", rtePod.Namespace, rtePod.Name, rteCnt.Name, st.FingerprintComputed, len(st.Pods))
301+
klog.InfoS("got status", "podNamespace", rtePod.Namespace, "podName", rtePod.Name, "containerName", rteCnt.Name, "fingerprintComputed", st.FingerprintComputed, "podCount", len(st.Pods))
302302

303303
gomega.Expect(st.FingerprintComputed).ToNot(gomega.BeEmpty(), "missing fingerprint - should always be reported")
304304
gomega.Expect(st.Pods).ToNot(gomega.BeEmpty(), "missing pods - at least RTE itself should be there")

test/e2e/sched/install/install_test.go

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -67,12 +67,11 @@ var _ = Describe("[Scheduler] install", func() {
6767

6868
cond := status.FindCondition(updatedNROObj.Status.Conditions, status.ConditionAvailable)
6969
if cond == nil {
70-
klog.Infof("missing conditions in %v", updatedNROObj)
70+
klog.InfoS("missing conditions", "nroObj", updatedNROObj)
7171
return false
7272
}
7373

74-
klog.Infof("condition: %v", cond)
75-
klog.Infof("conditions: %v", updatedNROObj.Status.Conditions)
74+
klog.InfoS("scheduler status", "availableCondition", cond, "conditions", updatedNROObj.Status.Conditions)
7675

7776
return cond.Status == metav1.ConditionTrue
7877
}).WithTimeout(5*time.Minute).WithPolling(10*time.Second).Should(BeTrue(), "NRO Scheduler condition did not become available")
@@ -90,7 +89,7 @@ var _ = Describe("[Scheduler] install", func() {
9089
}
9190

9291
if deployment.Status.ReadyReplicas != *deployment.Spec.Replicas {
93-
klog.Infof("Invalid number of ready replicas: desired: %d, actual: %d", *deployment.Spec.Replicas, deployment.Status.ReadyReplicas)
92+
klog.InfoS("Invalid number of ready replicas", "current", deployment.Status.ReadyReplicas, "desired", *deployment.Spec.Replicas)
9493
return false
9594
}
9695
return true

test/e2e/sched/sched_test.go

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -159,7 +159,7 @@ var _ = Describe("[Scheduler] imageReplacement", func() {
159159
}
160160
}
161161
if nroCM == nil {
162-
klog.Infof("cannot match ConfigMap")
162+
klog.InfoS("cannot match ConfigMap affecting scheduler", "schedulerName", nroSchedObj.Spec.SchedulerName, "schedulerImage", nroSchedObj.Spec.SchedulerImage)
163163
return false
164164
}
165165

@@ -183,7 +183,8 @@ var _ = Describe("[Scheduler] imageReplacement", func() {
183183
}
184184

185185
if diff := cmp.Diff(nroCM.Data, initialCM.Data); diff != "" {
186-
klog.Infof("updated ConfigMap data is not equal to the expected: %v", diff)
186+
// TODO: multi-line value in structured log
187+
klog.InfoS("updated ConfigMap data is not equal to the expected", "diff", diff)
187188
return false
188189
}
189190
return true
@@ -216,7 +217,8 @@ var _ = Describe("[Scheduler] imageReplacement", func() {
216217
}
217218

218219
if diff := cmp.Diff(dp.Spec.Template.Spec, initialDP.Spec.Template.Spec); diff != "" {
219-
klog.Infof("updated Deployment is not equal to the expected: %v", diff)
220+
// TODO: multi-line value in structured log
221+
klog.InfoS("updated Deployment is not equal to the expected", "diff", diff)
220222
return false
221223
}
222224
return true

test/e2e/serial/config/fixture.go

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,8 @@ func (cfg *E2EConfig) RecordNRTReference() error {
5656
if err != nil {
5757
return err
5858
}
59-
klog.Infof("recorded reference NRT data:\n%s", intnrt.ListToString(cfg.NRTList.Items, " reference"))
59+
// TODO: multi-line value in structured log
60+
klog.InfoS("recorded reference NRT data", "data", intnrt.ListToString(cfg.NRTList.Items, " reference"))
6061
return nil
6162
}
6263

@@ -100,7 +101,7 @@ func NewFixtureWithOptions(nsName string, options e2efixture.Options) (*E2EConfi
100101
}
101102

102103
cfg.SchedulerName = cfg.NROSchedObj.Status.SchedulerName
103-
klog.Infof("detected scheduler name: %q", cfg.SchedulerName)
104+
klog.InfoS("detected scheduler name", "schedulerName", cfg.SchedulerName)
104105

105106
return &cfg, nil
106107
}

0 commit comments

Comments
 (0)