Skip to content

Commit 02af389

Browse files
committed
[AA] e2e: klog: Infof -> InfoS
These changes where AI-Assisted (hence the AA tag). Finding manually all the instances: ``` git grep Infof test/e2e | cut -d: -f1 | uniq -c ``` Initial prompt: ``` Translate the calls to `klog.Infof` to `klog.InfoS`. Since the function signatures are different, rearrange the parameters to match the signature of the `InfoS` function. Modify only the file `test/e2e/tools/mkginkgolabelfilter_test.go` ``` Subsequent N prompts (roughly 15-25 calls to fix per prompt) ``` do the same for the files ... ``` files picked randomly by the user Assisted-by: Cursor AI Assisted-by-model: claude-4-sonnet Signed-off-by: Francesco Romani <[email protected]>
1 parent b3acbc1 commit 02af389

File tree

8 files changed

+37
-37
lines changed

8 files changed

+37
-37
lines changed

test/e2e/sched/uninstall/uninstall_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ var _ = Describe("[Scheduler] uninstall", func() {
6666
key := client.ObjectKeyFromObject(obj)
6767
if err := e2eclient.Client.Get(context.TODO(), key, obj); !errors.IsNotFound(err) {
6868
if err == nil {
69-
klog.Infof("obj %s still exists", key.String())
69+
klog.InfoS("obj still exists", "key", key.String())
7070
} else {
7171
klog.ErrorS(err, "obj return with error", "key", key.String())
7272
}

test/e2e/serial/config/config.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -137,7 +137,7 @@ func validateTopologyManagerConfiguration(kconfigs map[string]*kubeletconfigv1be
137137
}
138138
kconfTMPolicy := kconfig.TopologyManagerPolicy
139139
if kconfTMPolicy == "" {
140-
klog.Infof("Topology Manager Policy not set in kubeletconfig, fixing to %q", tmPolicyDefault)
140+
klog.InfoS("Topology Manager Policy not set in kubeletconfig, fixing", "policy", tmPolicyDefault)
141141
kconfTMPolicy = tmPolicyDefault
142142
}
143143
if nrtTMPolicy.Value != kconfTMPolicy {
@@ -152,7 +152,7 @@ func validateTopologyManagerConfiguration(kconfigs map[string]*kubeletconfigv1be
152152
}
153153
kconfTMScope := kconfig.TopologyManagerScope
154154
if kconfTMScope == "" {
155-
klog.Infof("Topology Manager Scope not set in kubeletconfig, fixing to %q", tmScopeDefault)
155+
klog.InfoS("Topology Manager Scope not set in kubeletconfig, fixing", "scope", tmScopeDefault)
156156
kconfTMScope = tmScopeDefault
157157
}
158158
if nrtTMScope.Value != kconfTMScope {

test/e2e/serial/tests/non_regression_fundamentals.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ var _ = Describe("numaresources fundamentals non-regression", Serial, Label("ser
8585
e2efixture.Skip(fxt, "Scheduler cache not enabled")
8686
}
8787
timeout := nroSchedObj.Status.CacheResyncPeriod.Round(time.Second) * 10
88-
klog.Infof("pod running timeout: %v", timeout)
88+
klog.InfoS("pod running timeout", "timeout", timeout)
8989

9090
nrts := e2enrt.FilterZoneCountEqual(nrtList.Items, 2)
9191
if len(nrts) < 1 {
@@ -96,7 +96,7 @@ var _ = Describe("numaresources fundamentals non-regression", Serial, Label("ser
9696
targetNodeName, ok := e2efixture.PopNodeName(nodesNames)
9797
Expect(ok).To(BeTrue())
9898

99-
klog.Infof("selected target node name: %q", targetNodeName)
99+
klog.InfoS("selected target node name", "nodeName", targetNodeName)
100100

101101
nrtInfo, err := e2enrt.FindFromList(nrts, targetNodeName)
102102
Expect(err).ToNot(HaveOccurred())
@@ -116,7 +116,7 @@ var _ = Describe("numaresources fundamentals non-regression", Serial, Label("ser
116116
// CAUTION: still assuming all NUMA zones are equal across all nodes
117117
numPods := int(cpusVal / cpusPerPod) // unlikely we will need more than a billion pods (!!)
118118

119-
klog.Infof("creating %d pods consuming %d cpus each (found %d per NUMA zone)", numPods, cpusVal, maxAllocPerNUMAVal)
119+
klog.InfoS("creating pods", "numPods", numPods, "cpusPerPod", cpusVal, "maxAllocPerNUMAZone", maxAllocPerNUMAVal)
120120

121121
var testPods []*corev1.Pod
122122
for idx := 0; idx < numPods; idx++ {
@@ -180,7 +180,7 @@ var _ = Describe("numaresources fundamentals non-regression", Serial, Label("ser
180180
e2efixture.Skip(fxt, "Scheduler cache not enabled")
181181
}
182182
timeout := nroSchedObj.Status.CacheResyncPeriod.Round(time.Second) * 10
183-
klog.Infof("pod running timeout: %v", timeout)
183+
klog.InfoS("pod running timeout", "timeout", timeout)
184184

185185
nrts := e2enrt.FilterZoneCountEqual(nrtList.Items, 2)
186186
if len(nrts) < 1 {
@@ -193,7 +193,7 @@ var _ = Describe("numaresources fundamentals non-regression", Serial, Label("ser
193193
referenceNodeName, ok := e2efixture.PopNodeName(nodesNames)
194194
Expect(ok).To(BeTrue())
195195

196-
klog.Infof("selected reference node name: %q", referenceNodeName)
196+
klog.InfoS("selected reference node name", "nodeName", referenceNodeName)
197197

198198
nrtInfo, err := e2enrt.FindFromList(nrts, referenceNodeName)
199199
Expect(err).ToNot(HaveOccurred())
@@ -211,7 +211,7 @@ var _ = Describe("numaresources fundamentals non-regression", Serial, Label("ser
211211
cpusVal := (10 * resVal) / 8
212212
numPods := int(int64(len(nrts)) * cpusVal / cpusPerPod) // unlikely we will need more than a billion pods (!!)
213213

214-
klog.Infof("creating %d pods consuming %d cpus each (found %d per NUMA zone)", numPods, cpusVal, resVal)
214+
klog.InfoS("creating pods", "numPods", numPods, "cpusPerPod", cpusVal, "resPerNUMAZone", resVal)
215215

216216
var testPods []*corev1.Pod
217217
for idx := 0; idx < numPods; idx++ {

test/e2e/serial/tests/tolerations.go

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -283,7 +283,7 @@ var _ = Describe("[serial][disruptive][rtetols] numaresources RTE tolerations su
283283
extraTols = false
284284
By("waiting for DaemonSet to be ready")
285285
// note we still have the taint
286-
klog.Infof("ensuring the RTE DS is running with less pods because taints (expected pods=%d)", len(workers)-1)
286+
klog.InfoS("ensuring the RTE DS is running with less pods because taints", "expectedPods", len(workers)-1)
287287
_, err = wait.With(fxt.Client).Interval(10*time.Second).Timeout(1*time.Minute).ForDaemonsetPodsCreation(ctx, dsKey, len(workers)-1)
288288
Expect(err).ToNot(HaveOccurred(), "daemonset %s did not start updated: %v", dsKey.String(), err)
289289
_, err = wait.With(fxt.Client).Interval(10*time.Second).Timeout(3*time.Minute).ForDaemonSetReadyByKey(ctx, dsKey)
@@ -312,14 +312,14 @@ var _ = Describe("[serial][disruptive][rtetols] numaresources RTE tolerations su
312312
Expect(err).ToNot(HaveOccurred())
313313
tnt = &tnts[0]
314314

315-
klog.Infof("randomly picking the target node (among %d)", len(workers))
315+
klog.InfoS("randomly picking the target node", "totalNodes", len(workers))
316316
targetIdx, ok := e2efixture.PickNodeIndex(workers)
317317
Expect(ok).To(BeTrue())
318318
taintedNode := &workers[targetIdx]
319319

320320
applyTaintToNode(ctx, fxt.Client, taintedNode, tnt)
321321
targetNodeNames = append(targetNodeNames, taintedNode.Name)
322-
klog.Infof("considering node: %q tainted with %q", taintedNode.Name, tnt.String())
322+
klog.InfoS("considering node tainted", "node", taintedNode.Name, "taint", tnt.String())
323323

324324
By(fmt.Sprintf("ensuring the RTE DS was created with expected pods count=%d", len(workers)))
325325
ds, err := wait.With(fxt.Client).Interval(time.Second).Timeout(3*time.Minute).ForDaemonsetPodsCreation(ctx, dsKey, len(workers))
@@ -340,7 +340,7 @@ var _ = Describe("[serial][disruptive][rtetols] numaresources RTE tolerations su
340340
_, err = wait.With(fxt.Client).Interval(10*time.Second).Timeout(3*time.Minute).ForDaemonSetReadyByKey(ctx, dsKey)
341341
Expect(err).ToNot(HaveOccurred(), "failed to get the daemonset %s: %v", dsKey.String(), err)
342342

343-
klog.Infof("verify the rte pod on node %q is evicted", taintedNode.Name)
343+
klog.InfoS("verify the rte pod on node is evicted", "node", taintedNode.Name)
344344
err = wait.With(fxt.Client).Timeout(2*time.Minute).ForPodDeleted(ctx, podOnNode.Namespace, podOnNode.Name)
345345
Expect(err).ToNot(HaveOccurred(), "pod %s/%s still exists", podOnNode.Namespace, podOnNode.Name)
346346
})
@@ -356,14 +356,14 @@ var _ = Describe("[serial][disruptive][rtetols] numaresources RTE tolerations su
356356
Expect(err).ToNot(HaveOccurred())
357357
tnt = &tnts[0]
358358

359-
klog.Infof("randomly picking the target node (among %d)", len(workers))
359+
klog.InfoS("randomly picking the target node", "totalNodes", len(workers))
360360
targetIdx, ok := e2efixture.PickNodeIndex(workers)
361361
Expect(ok).To(BeTrue())
362362
taintedNode := &workers[targetIdx]
363363

364364
applyTaintToNode(ctx, fxt.Client, taintedNode, tnt)
365365
targetNodeNames = append(targetNodeNames, taintedNode.Name)
366-
klog.Infof("considering node: %q tainted with %q", taintedNode.Name, tnt.String())
366+
klog.InfoS("considering node tainted", "node", taintedNode.Name, "taint", tnt.String())
367367

368368
By("add toleration to the NROP CR")
369369
tolerateVal1 := []corev1.Toleration{
@@ -393,15 +393,15 @@ var _ = Describe("[serial][disruptive][rtetols] numaresources RTE tolerations su
393393
tnt = &tnts[0]
394394

395395
applyTaintToNode(ctx, fxt.Client, taintedNode, tnt)
396-
klog.Infof("considering node: %q tainted with %q", taintedNode.Name, tnt.String())
396+
klog.InfoS("considering node tainted", "node", taintedNode.Name, "taint", tnt.String())
397397

398398
By(fmt.Sprintf("waiting for daemonset %v to report correct pods' number", dsKey.String()))
399399
updatedDs, err := wait.With(fxt.Client).Interval(time.Second).Timeout(time.Minute).ForDaemonsetPodsCreation(ctx, dsKey, len(workers)-1)
400400
Expect(err).NotTo(HaveOccurred(), "pods number is not as expected for RTE daemonset: expected %d found %d", len(workers)-1, updatedDs.Status.CurrentNumberScheduled)
401401
_, err = wait.With(e2eclient.Client).Interval(10*time.Second).Timeout(3*time.Minute).ForDaemonSetReadyByKey(ctx, dsKey)
402402
Expect(err).ToNot(HaveOccurred(), "failed to get the daemonset ready: %v", err)
403403

404-
klog.Infof("verify the rte pod on node %q is evicted", taintedNode.Name)
404+
klog.InfoS("verify the rte pod on node is evicted", "node", taintedNode.Name)
405405
err = wait.With(fxt.Client).Timeout(2*time.Minute).ForPodDeleted(ctx, podOnNode.Namespace, podOnNode.Name)
406406
Expect(err).ToNot(HaveOccurred(), "pod %s/%s still exists", podOnNode.Namespace, podOnNode.Name)
407407
})
@@ -496,7 +496,7 @@ var _ = Describe("[serial][disruptive][rtetols] numaresources RTE tolerations su
496496

497497
applyTaintToNode(ctx, fxt.Client, taintedNode, tnt)
498498
targetNodeNames = append(targetNodeNames, taintedNode.Name)
499-
klog.Infof("considering node: %q tainted with %q", taintedNode.Name, tnt.String())
499+
klog.InfoS("considering node tainted", "node", taintedNode.Name, "taint", tnt.String())
500500

501501
By("trigger an RTE pod restart on the tainted node by deleting the pod")
502502
ds := appsv1.DaemonSet{}
@@ -517,7 +517,7 @@ var _ = Describe("[serial][disruptive][rtetols] numaresources RTE tolerations su
517517
}
518518
Expect(podToDelete.Name).NotTo(Equal(""), "RTE pod was not found on node %q", taintedNode.Name)
519519

520-
klog.Infof("delete the pod %s/%s of the tainted node", podToDelete.Namespace, podToDelete.Name)
520+
klog.InfoS("delete the pod of the tainted node", "namespace", podToDelete.Namespace, "name", podToDelete.Name)
521521
err = fxt.Client.Delete(ctx, &podToDelete)
522522
Expect(err).ToNot(HaveOccurred())
523523
err = wait.With(fxt.Client).Timeout(2*time.Minute).ForPodDeleted(ctx, podToDelete.Namespace, podToDelete.Name)
@@ -594,7 +594,7 @@ var _ = Describe("[serial][disruptive][rtetols] numaresources RTE tolerations su
594594

595595
applyTaintToNode(ctx, fxt.Client, taintedNode, tnt)
596596
targetNodeNames = append(targetNodeNames, taintedNode.Name)
597-
klog.Infof("considering node: %q tainted with %q", taintedNode.Name, tnt.String())
597+
klog.InfoS("considering node tainted", "node", taintedNode.Name, "taint", tnt.String())
598598
})
599599

600600
AfterEach(func(ctx context.Context) {
@@ -729,7 +729,7 @@ var _ = Describe("[serial][disruptive][rtetols] numaresources RTE tolerations su
729729

730730
applyTaintToNode(ctx, fxt.Client, taintedNode, tnt)
731731
targetNodeNames = append(targetNodeNames, taintedNode.Name)
732-
klog.Infof("considering node: %q tainted with %q", taintedNode.Name, tnt.String())
732+
klog.InfoS("considering node tainted", "node", taintedNode.Name, "taint", tnt.String())
733733

734734
By(fmt.Sprintf("waiting for daemonset %v to report correct pods' number", dsKey.String()))
735735
updatedDs, err := wait.With(fxt.Client).Interval(time.Second).Timeout(time.Minute).ForDaemonsetPodsCreation(ctx, dsKey, len(workers)-1)
@@ -819,7 +819,7 @@ func isRTEPodFoundOnNode(cli client.Client, ctx context.Context, nodeName string
819819
if podLabels["name"] == "resource-topology" {
820820
found = true
821821
matchingPod = pod
822-
klog.Infof("RTE pod is found: %s/%s", pod.Namespace, pod.Name)
822+
klog.InfoS("RTE pod is found", "namespace", pod.Namespace, "name", pod.Name)
823823
break
824824
}
825825
}
@@ -930,6 +930,6 @@ func verifyUpdatedMCOnNodes(cli client.Client, ctx context.Context, node corev1.
930930
return false, fmt.Errorf("current mc mismatch for node %q", node.Name)
931931
}
932932

933-
klog.Infof("node %q is updated with mc %q", node.Name, desired)
933+
klog.InfoS("node is updated with mc", "node", node.Name, "mc", desired)
934934
return true, nil
935935
}

test/e2e/serial/tests/workload_placement_no_nrt.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -168,14 +168,14 @@ var _ = Describe("[serial] numaresources profile update", Serial, Label("feature
168168
})
169169

170170
func updateInfoRefreshPause(ctx context.Context, fxt *e2efixture.Fixture, newVal nropv1.InfoRefreshPauseMode, objForKey *nropv1.NUMAResourcesOperator) {
171-
klog.Infof("update InfoRefreshPause to %q only if the existing one is different", newVal)
171+
klog.InfoS("update InfoRefreshPause only if the existing one is different", "newValue", newVal)
172172
nroKey := client.ObjectKeyFromObject(objForKey)
173173
currentNrop := &nropv1.NUMAResourcesOperator{}
174174
err := fxt.Client.Get(ctx, nroKey, currentNrop)
175175
Expect(err).ToNot(HaveOccurred())
176176
currentVal := *currentNrop.Status.MachineConfigPools[0].Config.InfoRefreshPause
177177
if currentVal == newVal {
178-
klog.Infof("profile already has the updated InfoRefreshPause: %s=%s", currentVal, newVal)
178+
klog.InfoS("profile already has the updated InfoRefreshPause", "currentValue", currentVal, "newValue", newVal)
179179
return
180180
}
181181

@@ -211,7 +211,7 @@ func updateInfoRefreshPause(ctx context.Context, fxt *e2efixture.Fixture, newVal
211211
currentMode = *updatedObj.Status.MachineConfigPools[0].Config.InfoRefreshPause
212212
}
213213
if currentMode != newVal {
214-
klog.Infof("resource status is not updated yet: expected %q found %q", newVal, currentMode)
214+
klog.InfoS("resource status is not updated yet", "expected", newVal, "found", currentMode)
215215
return false
216216
}
217217

@@ -223,7 +223,7 @@ func updateInfoRefreshPause(ctx context.Context, fxt *e2efixture.Fixture, newVal
223223
}
224224

225225
func waitForDaemonSetUpdate(ctx context.Context, fxt *e2efixture.Fixture, dsNsName nropv1.NamespacedName, rtePods []corev1.Pod) {
226-
klog.Infof(fmt.Sprintf("ensure old RTE pods of ds %q are deleted", dsNsName))
226+
klog.InfoS("ensure old RTE pods of ds are deleted", "daemonset", dsNsName)
227227
err := wait.With(fxt.Client).Interval(30*time.Second).Timeout(5*time.Minute).ForPodListAllDeleted(ctx, rtePods)
228228
Expect(err).ToNot(HaveOccurred(), "Expected old RTE pods owned by the DaemonSet to be deleted within the timeout")
229229

test/e2e/serial/tests/workload_placement_taint.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -75,15 +75,15 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme
7575
if len(nrtCandidates) < 2 {
7676
e2efixture.Skipf(fxt, "not enough nodes with 2 NUMA Zones: found %d", len(nrtCandidates))
7777
}
78-
klog.Infof("Found node with 2 NUMA zones: %d", len(nrtCandidates))
78+
klog.InfoS("Found node with 2 NUMA zones", "count", len(nrtCandidates))
7979

8080
// we're ok with any TM policy as long as the updater can handle it,
8181
// we use this as proxy for "there is valid NRT data for at least X nodes
8282
nrts = e2enrt.FilterByTopologyManagerPolicy(nrtCandidates, intnrt.SingleNUMANode)
8383
if len(nrts) < 2 {
8484
e2efixture.Skipf(fxt, "not enough nodes with valid policy - found %d", len(nrts))
8585
}
86-
klog.Infof("Found node with 2 NUMA zones: %d", len(nrts))
86+
klog.InfoS("Found node with 2 NUMA zones", "count", len(nrts))
8787

8888
// Note that this test, being part of "serial", expects NO OTHER POD being scheduled
8989
// in between, so we consider this information current and valid when the It()s run.
@@ -175,7 +175,7 @@ var _ = Describe("[serial][disruptive][scheduler] numaresources workload placeme
175175
targetNodeName, ok := e2efixture.PopNodeName(targetNodeNameSet)
176176
Expect(ok).To(BeTrue())
177177

178-
klog.Infof("target node will be %q", targetNodeName)
178+
klog.InfoS("target node will be", "node", targetNodeName)
179179

180180
var nrtInitial nrtv1alpha2.NodeResourceTopology
181181
err := fxt.Client.Get(context.TODO(), client.ObjectKey{Name: targetNodeName}, &nrtInitial)
@@ -291,7 +291,7 @@ func untaintNodes(cli client.Client, taintedNodeNames []string, taint *corev1.Ta
291291
return nil
292292
}
293293

294-
klog.Infof("removing taint: %q from node: %q", taint.String(), updatedNode.Name)
294+
klog.InfoS("removing taint from node", "taint", taint.String(), "node", updatedNode.Name)
295295
err = cli.Update(context.TODO(), updatedNode)
296296
if err != nil {
297297
return err
@@ -342,12 +342,12 @@ func applyTaintToNode(ctx context.Context, cli client.Client, targetNode *corev1
342342
return nil
343343
}
344344

345-
klog.Infof("adding taint: %q to node: %q", tnt.String(), updatedNode.Name)
345+
klog.InfoS("adding taint to node", "taint", tnt.String(), "node", updatedNode.Name)
346346
err = cli.Update(ctx, updatedNode)
347347
if err != nil {
348348
return err
349349
}
350-
klog.Infof("added taint: %q to node: %q", tnt.String(), updatedNode.Name)
350+
klog.InfoS("added taint to node", "taint", tnt.String(), "node", updatedNode.Name)
351351
return nil
352352
}).WithPolling(1 * time.Second).WithTimeout(1 * time.Minute).Should(Succeed())
353353
return updatedNode

test/e2e/tools/mkginkgolabelfilter_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ var _ = Describe("[tools][mkginkgolabelfilter] Auxiliary tools", Label("tools",
6464
}
6565
expectExecutableExists(cmdline[0])
6666
for _, tc := range testcases {
67-
klog.Infof("running %q\n", tc.name)
67+
klog.InfoS("running", "testCase", tc.name)
6868
buffer := bytes.Buffer{}
6969
toolCmd := exec.Command(cmdline[0])
7070
_, err := buffer.Write(append([]byte(tc.input), "\n"...))

test/e2e/uninstall/uninstall_test.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -76,12 +76,12 @@ var _ = Describe("[Uninstall] clusterCleanup", Serial, func() {
7676
Expect(err).NotTo(HaveOccurred())
7777

7878
if err := e2eclient.Client.Delete(context.TODO(), nroObj); err != nil {
79-
klog.Infof("failed to delete the numaresourcesoperators %q", nroObj.Name)
79+
klog.InfoS("failed to delete the numaresourcesoperators", "name", nroObj.Name)
8080
return
8181
}
8282

8383
if err := e2eclient.Client.Delete(context.TODO(), kcObj); err != nil && !errors.IsNotFound(err) {
84-
klog.Infof("failed to delete the kubeletconfigs %q", kcObj.Name)
84+
klog.InfoS("failed to delete the kubeletconfigs", "name", kcObj.Name)
8585
}
8686

8787
timeout := configuration.MachineConfigPoolUpdateTimeout // shortcut
@@ -91,7 +91,7 @@ var _ = Describe("[Uninstall] clusterCleanup", Serial, func() {
9191
if configuration.Plat == platform.Kubernetes {
9292
mcpObj := objects.TestMCP()
9393
if err := e2eclient.Client.Delete(context.TODO(), mcpObj); err != nil {
94-
klog.Infof("failed to delete the machine config pool %q", mcpObj.Name)
94+
klog.InfoS("failed to delete the machine config pool", "name", mcpObj.Name)
9595
}
9696
}
9797

0 commit comments

Comments
 (0)