Skip to content

Commit aaf855c

Browse files
committed
deref all calls to metav1.NewDeleteOptions that are passed to clients.
This is gross but because NewDeleteOptions is used by various parts of storage that still pass around pointers, the return type can't be changed without significant refactoring within the apiserver. I think this would be good to cleanup, but I want to minimize apiserver side changes as much as possible in the client signature refactor.
1 parent c58e69e commit aaf855c

33 files changed

+76
-76
lines changed

pkg/controller/podgc/gc_controller.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ func NewPodGC(kubeClient clientset.Interface, podInformer coreinformers.PodInfor
7676
nodeQueue: workqueue.NewNamedDelayingQueue("orphaned_pods_nodes"),
7777
deletePod: func(namespace, name string) error {
7878
klog.Infof("PodGC is force deleting Pod: %v/%v", namespace, name)
79-
return kubeClient.CoreV1().Pods(namespace).Delete(context.TODO(), name, metav1.NewDeleteOptions(0))
79+
return kubeClient.CoreV1().Pods(namespace).Delete(context.TODO(), name, *metav1.NewDeleteOptions(0))
8080
},
8181
}
8282

test/e2e/apimachinery/etcd_failure.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,7 @@ func checkExistingRCRecovers(f *framework.Framework) {
125125
return false, nil
126126
}
127127
for _, pod := range pods.Items {
128-
err = podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
128+
err = podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
129129
framework.ExpectNoError(err, "failed to delete pod %s in namespace: %s", pod.Name, f.Namespace.Name)
130130
}
131131
framework.Logf("apiserver has recovered")

test/e2e/apimachinery/generated_clientset.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,7 @@ var _ = SIGDescribe("Generated clientset", func() {
151151

152152
ginkgo.By("deleting the pod gracefully")
153153
gracePeriod := int64(31)
154-
if err := podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(gracePeriod)); err != nil {
154+
if err := podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(gracePeriod)); err != nil {
155155
framework.Failf("Failed to delete pod: %v", err)
156156
}
157157

test/e2e/apimachinery/resource_quota.go

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -274,7 +274,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
274274
framework.ExpectNoError(err)
275275

276276
ginkgo.By("Deleting the pod")
277-
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, metav1.NewDeleteOptions(0))
277+
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(0))
278278
framework.ExpectNoError(err)
279279

280280
ginkgo.By("Ensuring resource quota status released the pod usage")
@@ -712,7 +712,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
712712
framework.ExpectNoError(err)
713713

714714
ginkgo.By("Deleting the pod")
715-
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, metav1.NewDeleteOptions(0))
715+
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(0))
716716
framework.ExpectNoError(err)
717717

718718
ginkgo.By("Ensuring resource quota status released the pod usage")
@@ -751,7 +751,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
751751
framework.ExpectNoError(err)
752752

753753
ginkgo.By("Deleting the pod")
754-
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, metav1.NewDeleteOptions(0))
754+
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(0))
755755
framework.ExpectNoError(err)
756756

757757
ginkgo.By("Ensuring resource quota status released the pod usage")
@@ -808,7 +808,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
808808
framework.ExpectNoError(err)
809809

810810
ginkgo.By("Deleting the pod")
811-
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
811+
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
812812
framework.ExpectNoError(err)
813813

814814
ginkgo.By("Ensuring resource quota status released the pod usage")
@@ -838,7 +838,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
838838
framework.ExpectNoError(err)
839839

840840
ginkgo.By("Deleting the pod")
841-
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
841+
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
842842
framework.ExpectNoError(err)
843843

844844
ginkgo.By("Ensuring resource quota status released the pod usage")
@@ -938,7 +938,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() {
938938
framework.ExpectNoError(err)
939939

940940
ginkgo.By("Deleting the pod")
941-
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
941+
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
942942
framework.ExpectNoError(err)
943943

944944
ginkgo.By("Ensuring resource quota status released the pod usage")
@@ -968,7 +968,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() {
968968
framework.ExpectNoError(err)
969969

970970
ginkgo.By("Deleting the pod")
971-
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
971+
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
972972
framework.ExpectNoError(err)
973973

974974
ginkgo.By("Ensuring resource quota status released the pod usage")
@@ -1028,7 +1028,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() {
10281028
framework.ExpectNoError(err)
10291029

10301030
ginkgo.By("Deleting the pod")
1031-
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, metav1.NewDeleteOptions(0))
1031+
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(0))
10321032
framework.ExpectNoError(err)
10331033

10341034
ginkgo.By("Ensuring resource quota status released the pod usage")
@@ -1067,7 +1067,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:ScopeSelectors]", func() {
10671067
framework.ExpectNoError(err)
10681068

10691069
ginkgo.By("Deleting the pod")
1070-
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, metav1.NewDeleteOptions(0))
1070+
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(0))
10711071
framework.ExpectNoError(err)
10721072

10731073
ginkgo.By("Ensuring resource quota status released the pod usage")
@@ -1114,7 +1114,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
11141114
framework.ExpectNoError(err)
11151115

11161116
ginkgo.By("Deleting the pod")
1117-
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
1117+
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
11181118
framework.ExpectNoError(err)
11191119

11201120
ginkgo.By("Ensuring resource quota status released the pod usage")
@@ -1159,7 +1159,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
11591159
framework.ExpectError(err)
11601160

11611161
ginkgo.By("Deleting first pod")
1162-
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
1162+
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
11631163
framework.ExpectNoError(err)
11641164

11651165
ginkgo.By("Ensuring resource quota status released the pod usage")
@@ -1209,9 +1209,9 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
12091209
framework.ExpectNoError(err)
12101210

12111211
ginkgo.By("Deleting both pods")
1212-
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
1212+
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
12131213
framework.ExpectNoError(err)
1214-
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod2.Name, metav1.NewDeleteOptions(0))
1214+
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod2.Name, *metav1.NewDeleteOptions(0))
12151215
framework.ExpectNoError(err)
12161216
})
12171217

@@ -1258,9 +1258,9 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
12581258
framework.ExpectNoError(err)
12591259

12601260
ginkgo.By("Deleting both pods")
1261-
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
1261+
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
12621262
framework.ExpectNoError(err)
1263-
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod2.Name, metav1.NewDeleteOptions(0))
1263+
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod2.Name, *metav1.NewDeleteOptions(0))
12641264
framework.ExpectNoError(err)
12651265

12661266
ginkgo.By("Ensuring resource quota status released the pod usage")
@@ -1299,7 +1299,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
12991299
framework.ExpectNoError(err)
13001300

13011301
ginkgo.By("Deleting the pod")
1302-
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
1302+
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
13031303
framework.ExpectNoError(err)
13041304
})
13051305

@@ -1333,7 +1333,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
13331333
framework.ExpectNoError(err)
13341334

13351335
ginkgo.By("Deleting the pod")
1336-
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
1336+
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
13371337
framework.ExpectNoError(err)
13381338

13391339
ginkgo.By("Ensuring resource quota status released the pod usage")
@@ -1391,7 +1391,7 @@ var _ = SIGDescribe("ResourceQuota [Feature:PodPriority]", func() {
13911391
framework.ExpectNoError(err)
13921392

13931393
ginkgo.By("Deleting the pod")
1394-
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
1394+
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
13951395
framework.ExpectNoError(err)
13961396

13971397
ginkgo.By("Ensuring resource quota status released the pod usage")

test/e2e/apps/statefulset.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -763,7 +763,7 @@ var _ = SIGDescribe("StatefulSet", func() {
763763
}
764764

765765
ginkgo.By("Removing pod with conflicting port in namespace " + f.Namespace.Name)
766-
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
766+
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
767767
framework.ExpectNoError(err)
768768

769769
ginkgo.By("Waiting when stateful pod " + statefulPodName + " will be recreated in namespace " + f.Namespace.Name + " and will be in running state")

test/e2e/autoscaling/cluster_size_autoscaling.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1451,7 +1451,7 @@ func drainNode(f *framework.Framework, node *v1.Node) {
14511451
pods, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), podOpts)
14521452
framework.ExpectNoError(err)
14531453
for _, pod := range pods.Items {
1454-
err = f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
1454+
err = f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
14551455
framework.ExpectNoError(err)
14561456
}
14571457
}

test/e2e/common/container.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ func (cc *ConformanceContainer) Create() {
7070
}
7171

7272
func (cc *ConformanceContainer) Delete() error {
73-
return cc.PodClient.Delete(context.TODO(), cc.podName, metav1.NewDeleteOptions(0))
73+
return cc.PodClient.Delete(context.TODO(), cc.podName, *metav1.NewDeleteOptions(0))
7474
}
7575

7676
func (cc *ConformanceContainer) IsReady() (bool, error) {

test/e2e/common/container_probe.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -415,7 +415,7 @@ func RunLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int,
415415
// At the end of the test, clean up by removing the pod.
416416
defer func() {
417417
ginkgo.By("deleting the pod")
418-
podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
418+
podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
419419
}()
420420
ginkgo.By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns))
421421
podClient.Create(pod)

test/e2e/common/lifecycle_hook.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ var _ = framework.KubeDescribe("Container Lifecycle Hook", func() {
8282
}, postStartWaitTimeout, podCheckInterval).Should(gomega.BeNil())
8383
}
8484
ginkgo.By("delete the pod with lifecycle hook")
85-
podClient.DeleteSync(podWithHook.Name, metav1.NewDeleteOptions(15), framework.DefaultPodDeletionTimeout)
85+
podClient.DeleteSync(podWithHook.Name, *metav1.NewDeleteOptions(15), framework.DefaultPodDeletionTimeout)
8686
if podWithHook.Spec.Containers[0].Lifecycle.PreStop != nil {
8787
ginkgo.By("check prestop hook")
8888
gomega.Eventually(func() error {

test/e2e/common/pods.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -296,7 +296,7 @@ var _ = framework.KubeDescribe("Pods", func() {
296296
framework.ExpectNoError(err, "failed to GET scheduled pod")
297297

298298
ginkgo.By("deleting the pod gracefully")
299-
err = podClient.Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(30))
299+
err = podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(30))
300300
framework.ExpectNoError(err, "failed to delete pod")
301301

302302
ginkgo.By("verifying the kubelet observed the termination notice")

0 commit comments

Comments
 (0)