Skip to content

Commit 76f8594

Browse files
committed
more artisanal fixes
Most of these could have been refactored automatically but it wouldn't have been uglier. The unsophisticated tooling left lots of unnecessary struct -> pointer -> struct transitions.
1 parent aaf855c commit 76f8594

File tree

34 files changed

+94
-101
lines changed

34 files changed

+94
-101
lines changed

cmd/kubeadm/app/phases/upgrade/health.go

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -200,10 +200,7 @@ func createJob(client clientset.Interface, cfg *kubeadmapi.ClusterConfiguration)
200200
func deleteHealthCheckJob(client clientset.Interface, ns, jobName string) error {
201201
klog.V(2).Infof("Deleting Job %q in the namespace %q", jobName, ns)
202202
propagation := metav1.DeletePropagationForeground
203-
deleteOptions := &metav1.DeleteOptions{
204-
PropagationPolicy: &propagation,
205-
}
206-
if err := client.BatchV1().Jobs(ns).Delete(context.TODO(), jobName, deleteOptions); err != nil {
203+
if err := client.BatchV1().Jobs(ns).Delete(context.TODO(), jobName, metav1.DeleteOptions{PropagationPolicy: &propagation}); err != nil {
207204
return errors.Wrapf(err, "could not delete Job %q in the namespace %q", jobName, ns)
208205
}
209206
return nil

cmd/kubeadm/app/util/apiclient/idempotency.go

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -194,19 +194,13 @@ func CreateOrUpdateDaemonSet(client clientset.Interface, ds *apps.DaemonSet) err
194194
// DeleteDaemonSetForeground deletes the specified DaemonSet in foreground mode; i.e. it blocks until/makes sure all the managed Pods are deleted
195195
func DeleteDaemonSetForeground(client clientset.Interface, namespace, name string) error {
196196
foregroundDelete := metav1.DeletePropagationForeground
197-
deleteOptions := &metav1.DeleteOptions{
198-
PropagationPolicy: &foregroundDelete,
199-
}
200-
return client.AppsV1().DaemonSets(namespace).Delete(context.TODO(), name, deleteOptions)
197+
return client.AppsV1().DaemonSets(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{PropagationPolicy: &foregroundDelete})
201198
}
202199

203200
// DeleteDeploymentForeground deletes the specified Deployment in foreground mode; i.e. it blocks until/makes sure all the managed Pods are deleted
204201
func DeleteDeploymentForeground(client clientset.Interface, namespace, name string) error {
205202
foregroundDelete := metav1.DeletePropagationForeground
206-
deleteOptions := &metav1.DeleteOptions{
207-
PropagationPolicy: &foregroundDelete,
208-
}
209-
return client.AppsV1().Deployments(namespace).Delete(context.TODO(), name, deleteOptions)
203+
return client.AppsV1().Deployments(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{PropagationPolicy: &foregroundDelete})
210204
}
211205

212206
// CreateOrUpdateRole creates a Role if the target resource doesn't exist. If the resource exists already, this function will update the resource instead.

pkg/controller/bootstrap/tokencleaner.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -192,9 +192,9 @@ func (tc *TokenCleaner) evalSecret(o interface{}) {
192192
ttl, alreadyExpired := bootstrapsecretutil.GetExpiration(secret, time.Now())
193193
if alreadyExpired {
194194
klog.V(3).Infof("Deleting expired secret %s/%s", secret.Namespace, secret.Name)
195-
var options *metav1.DeleteOptions
195+
var options metav1.DeleteOptions
196196
if len(secret.UID) > 0 {
197-
options = &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &secret.UID}}
197+
options.Preconditions = &metav1.Preconditions{UID: &secret.UID}
198198
}
199199
err := tc.client.CoreV1().Secrets(secret.Namespace).Delete(context.TODO(), secret.Name, options)
200200
// NotFound isn't a real error (it's already been deleted)

pkg/controller/serviceaccount/tokens_controller.go

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -342,9 +342,9 @@ func (e *TokensController) deleteTokens(serviceAccount *v1.ServiceAccount) ( /*r
342342
}
343343

344344
func (e *TokensController) deleteToken(ns, name string, uid types.UID) ( /*retry*/ bool, error) {
345-
var opts *metav1.DeleteOptions
345+
var opts metav1.DeleteOptions
346346
if len(uid) > 0 {
347-
opts = &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &uid}}
347+
opts.Preconditions = &metav1.Preconditions{UID: &uid}
348348
}
349349
err := e.client.CoreV1().Secrets(ns).Delete(context.TODO(), name, opts)
350350
// NotFound doesn't need a retry (it's already been deleted)
@@ -460,9 +460,9 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccou
460460
if !addedReference {
461461
// we weren't able to use the token, try to clean it up.
462462
klog.V(2).Infof("deleting secret %s/%s because reference couldn't be added (%v)", secret.Namespace, secret.Name, err)
463-
deleteOpts := &metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &createdToken.UID}}
464-
if deleteErr := e.client.CoreV1().Secrets(createdToken.Namespace).Delete(context.TODO(), createdToken.Name, deleteOpts); deleteErr != nil {
465-
klog.Error(deleteErr) // if we fail, just log it
463+
deleteOpts := metav1.DeleteOptions{Preconditions: &metav1.Preconditions{UID: &createdToken.UID}}
464+
if err := e.client.CoreV1().Secrets(createdToken.Namespace).Delete(context.TODO(), createdToken.Name, deleteOpts); err != nil {
465+
klog.Error(err) // if we fail, just log it
466466
}
467467
}
468468

pkg/controller/testutil/test_utils.go

Lines changed: 6 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -26,28 +26,26 @@ import (
2626
"testing"
2727
"time"
2828

29+
v1 "k8s.io/api/core/v1"
2930
apierrors "k8s.io/apimachinery/pkg/api/errors"
3031
"k8s.io/apimachinery/pkg/api/resource"
3132
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3233
"k8s.io/apimachinery/pkg/runtime"
3334
"k8s.io/apimachinery/pkg/types"
35+
"k8s.io/apimachinery/pkg/util/clock"
3436
"k8s.io/apimachinery/pkg/util/sets"
3537
"k8s.io/apimachinery/pkg/util/strategicpatch"
3638
"k8s.io/apimachinery/pkg/watch"
37-
38-
"k8s.io/apimachinery/pkg/util/clock"
39-
ref "k8s.io/client-go/tools/reference"
40-
41-
v1 "k8s.io/api/core/v1"
4239
"k8s.io/client-go/kubernetes/fake"
4340
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
4441
"k8s.io/client-go/tools/cache"
42+
ref "k8s.io/client-go/tools/reference"
43+
"k8s.io/klog"
4544
"k8s.io/kubernetes/pkg/api/legacyscheme"
4645
api "k8s.io/kubernetes/pkg/apis/core"
4746
utilnode "k8s.io/kubernetes/pkg/util/node"
4847

4948
jsonpatch "github.com/evanphx/json-patch"
50-
"k8s.io/klog"
5149
)
5250

5351
var (
@@ -183,7 +181,7 @@ func (m *FakeNodeHandler) List(_ context.Context, opts metav1.ListOptions) (*v1.
183181
}
184182

185183
// Delete deletes a Node from the fake store.
186-
func (m *FakeNodeHandler) Delete(_ context.Context, id string, opt *metav1.DeleteOptions) error {
184+
func (m *FakeNodeHandler) Delete(_ context.Context, id string, opt metav1.DeleteOptions) error {
187185
m.lock.Lock()
188186
defer func() {
189187
m.RequestCount++
@@ -197,7 +195,7 @@ func (m *FakeNodeHandler) Delete(_ context.Context, id string, opt *metav1.Delet
197195
}
198196

199197
// DeleteCollection deletes a collection of Nodes from the fake store.
200-
func (m *FakeNodeHandler) DeleteCollection(_ context.Context, opt *metav1.DeleteOptions, listOpts metav1.ListOptions) error {
198+
func (m *FakeNodeHandler) DeleteCollection(_ context.Context, opt metav1.DeleteOptions, listOpts metav1.ListOptions) error {
201199
return nil
202200
}
203201

pkg/controller/ttlafterfinished/ttlafterfinished_controller.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -230,7 +230,7 @@ func (tc *Controller) processJob(key string) error {
230230
}
231231
// Cascade deletes the Jobs if TTL truly expires.
232232
policy := metav1.DeletePropagationForeground
233-
options := &metav1.DeleteOptions{
233+
options := metav1.DeleteOptions{
234234
PropagationPolicy: &policy,
235235
Preconditions: &metav1.Preconditions{UID: &fresh.UID},
236236
}

pkg/kubelet/status/status_manager.go

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -583,9 +583,12 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) {
583583

584584
// We don't handle graceful deletion of mirror pods.
585585
if m.canBeDeleted(pod, status.status) {
586-
deleteOptions := metav1.NewDeleteOptions(0)
587-
// Use the pod UID as the precondition for deletion to prevent deleting a newly created pod with the same name and namespace.
588-
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(pod.UID))
586+
deleteOptions := metav1.DeleteOptions{
587+
GracePeriodSeconds: new(int64),
588+
// Use the pod UID as the precondition for deletion to prevent deleting a
589+
// newly created pod with the same name and namespace.
590+
Preconditions: metav1.NewUIDPreconditions(string(pod.UID)),
591+
}
589592
err = m.kubeClient.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, deleteOptions)
590593
if err != nil {
591594
klog.Warningf("Failed to delete status for pod %q: %v", format.Pod(pod), err)

staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -262,7 +262,7 @@ func (c *autoRegisterController) checkAPIService(name string) (err error) {
262262

263263
// we have a spurious APIService that we're managing, delete it (5A,6A)
264264
case desired == nil:
265-
opts := &metav1.DeleteOptions{Preconditions: metav1.NewUIDPreconditions(string(curr.UID))}
265+
opts := metav1.DeleteOptions{Preconditions: metav1.NewUIDPreconditions(string(curr.UID))}
266266
err := c.apiServiceClient.APIServices().Delete(context.TODO(), curr.Name, opts)
267267
if apierrors.IsNotFound(err) || apierrors.IsConflict(err) {
268268
// deleted or changed in the meantime, we'll get called again

staging/src/k8s.io/kubectl/pkg/drain/drain.go

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -121,8 +121,8 @@ func CheckEvictionSupport(clientset kubernetes.Interface) (string, error) {
121121
return "", nil
122122
}
123123

124-
func (d *Helper) makeDeleteOptions() *metav1.DeleteOptions {
125-
deleteOptions := &metav1.DeleteOptions{}
124+
func (d *Helper) makeDeleteOptions() metav1.DeleteOptions {
125+
deleteOptions := metav1.DeleteOptions{}
126126
if d.GracePeriodSeconds >= 0 {
127127
gracePeriodSeconds := int64(d.GracePeriodSeconds)
128128
deleteOptions.GracePeriodSeconds = &gracePeriodSeconds
@@ -150,6 +150,8 @@ func (d *Helper) EvictPod(pod corev1.Pod, policyGroupVersion string) error {
150150
return err
151151
}
152152
}
153+
154+
delOpts := d.makeDeleteOptions()
153155
eviction := &policyv1beta1.Eviction{
154156
TypeMeta: metav1.TypeMeta{
155157
APIVersion: policyGroupVersion,
@@ -159,8 +161,9 @@ func (d *Helper) EvictPod(pod corev1.Pod, policyGroupVersion string) error {
159161
Name: pod.Name,
160162
Namespace: pod.Namespace,
161163
},
162-
DeleteOptions: d.makeDeleteOptions(),
164+
DeleteOptions: &delOpts,
163165
}
166+
164167
// Remember to change change the URL manipulation func when Eviction's version change
165168
return d.Client.PolicyV1beta1().Evictions(eviction.Namespace).Evict(eviction)
166169
}

test/e2e/apimachinery/garbage_collector.go

Lines changed: 11 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -79,19 +79,19 @@ func estimateMaximumPods(c clientset.Interface, min, max int32) int32 {
7979
return availablePods
8080
}
8181

82-
func getForegroundOptions() *metav1.DeleteOptions {
82+
func getForegroundOptions() metav1.DeleteOptions {
8383
policy := metav1.DeletePropagationForeground
84-
return &metav1.DeleteOptions{PropagationPolicy: &policy}
84+
return metav1.DeleteOptions{PropagationPolicy: &policy}
8585
}
8686

87-
func getBackgroundOptions() *metav1.DeleteOptions {
87+
func getBackgroundOptions() metav1.DeleteOptions {
8888
policy := metav1.DeletePropagationBackground
89-
return &metav1.DeleteOptions{PropagationPolicy: &policy}
89+
return metav1.DeleteOptions{PropagationPolicy: &policy}
9090
}
9191

92-
func getOrphanOptions() *metav1.DeleteOptions {
92+
func getOrphanOptions() metav1.DeleteOptions {
9393
policy := metav1.DeletePropagationOrphan
94-
return &metav1.DeleteOptions{PropagationPolicy: &policy}
94+
return metav1.DeleteOptions{PropagationPolicy: &policy}
9595
}
9696

9797
var (
@@ -473,8 +473,9 @@ var _ = SIGDescribe("Garbage collector", func() {
473473
framework.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err)
474474
}
475475
ginkgo.By("delete the rc")
476-
deleteOptions := &metav1.DeleteOptions{}
477-
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(rc.UID))
476+
deleteOptions := metav1.DeleteOptions{
477+
Preconditions: metav1.NewUIDPreconditions(string(rc.UID)),
478+
}
478479
if err := rcClient.Delete(context.TODO(), rc.ObjectMeta.Name, deleteOptions); err != nil {
479480
framework.Failf("failed to delete the rc: %v", err)
480481
}
@@ -1101,7 +1102,8 @@ var _ = SIGDescribe("Garbage collector", func() {
11011102
framework.Logf("created dependent resource %q", dependentName)
11021103

11031104
// Delete the owner and orphan the dependent.
1104-
err = resourceClient.Delete(ownerName, getOrphanOptions())
1105+
delOpts := getOrphanOptions()
1106+
err = resourceClient.Delete(ownerName, &delOpts)
11051107
if err != nil {
11061108
framework.Failf("failed to delete owner resource %q: %v", ownerName, err)
11071109
}

0 commit comments

Comments
 (0)