Skip to content

Commit e314ec2

Browse files
authored
Merge pull request kubernetes#120998 from kannon92/job-ptr-update
convert pointer to ptr for job controller
2 parents f19b62f + b96a074 commit e314ec2

File tree

6 files changed

+214
-214
lines changed

6 files changed

+214
-214
lines changed

pkg/controller/job/backoff_utils.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ import (
2626
"k8s.io/klog/v2"
2727
apipod "k8s.io/kubernetes/pkg/api/v1/pod"
2828
"k8s.io/utils/clock"
29-
"k8s.io/utils/pointer"
29+
"k8s.io/utils/ptr"
3030
)
3131

3232
type backoffRecord struct {
@@ -207,7 +207,7 @@ func getFinishTimeFromPodReadyFalseCondition(p *v1.Pod) *time.Time {
207207

208208
func getFinishTimeFromDeletionTimestamp(p *v1.Pod) *time.Time {
209209
if p.DeletionTimestamp != nil {
210-
finishTime := p.DeletionTimestamp.Time.Add(-time.Duration(pointer.Int64Deref(p.DeletionGracePeriodSeconds, 0)) * time.Second)
210+
finishTime := p.DeletionTimestamp.Time.Add(-time.Duration(ptr.Deref(p.DeletionGracePeriodSeconds, 0)) * time.Second)
211211
return &finishTime
212212
}
213213
return nil

pkg/controller/job/backoff_utils_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ import (
2525
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2626
"k8s.io/klog/v2/ktesting"
2727
clocktesting "k8s.io/utils/clock/testing"
28-
"k8s.io/utils/pointer"
28+
"k8s.io/utils/ptr"
2929
)
3030

3131
func TestNewBackoffRecord(t *testing.T) {
@@ -287,7 +287,7 @@ func TestGetFinishedTime(t *testing.T) {
287287
},
288288
ObjectMeta: metav1.ObjectMeta{
289289
DeletionTimestamp: &metav1.Time{Time: defaultTestTime},
290-
DeletionGracePeriodSeconds: pointer.Int64(30),
290+
DeletionGracePeriodSeconds: ptr.To[int64](30),
291291
},
292292
},
293293
wantFinishTime: defaultTestTimeMinus30s,

pkg/controller/job/indexed_job_utils_test.go

Lines changed: 41 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ import (
3131
"k8s.io/klog/v2/ktesting"
3232
"k8s.io/kubernetes/pkg/controller"
3333
"k8s.io/kubernetes/pkg/features"
34-
"k8s.io/utils/pointer"
34+
"k8s.io/utils/ptr"
3535
)
3636

3737
const noIndex = "-"
@@ -209,7 +209,7 @@ func TestCalculateSucceededIndexes(t *testing.T) {
209209
CompletedIndexes: tc.prevSucceeded,
210210
},
211211
Spec: batch.JobSpec{
212-
Completions: pointer.Int32(tc.completions),
212+
Completions: ptr.To(tc.completions),
213213
},
214214
}
215215
pods := hollowPodsWithIndexPhase(tc.pods)
@@ -238,8 +238,8 @@ func TestIsIndexFailed(t *testing.T) {
238238
"failed pod exceeding backoffLimitPerIndex, when backoffLimitPerIndex=0": {
239239
job: batch.Job{
240240
Spec: batch.JobSpec{
241-
Completions: pointer.Int32(2),
242-
BackoffLimitPerIndex: pointer.Int32(0),
241+
Completions: ptr.To[int32](2),
242+
BackoffLimitPerIndex: ptr.To[int32](0),
243243
},
244244
},
245245
pod: buildPod().indexFailureCount("0").phase(v1.PodFailed).index("0").trackingFinalizer().Pod,
@@ -248,8 +248,8 @@ func TestIsIndexFailed(t *testing.T) {
248248
"failed pod exceeding backoffLimitPerIndex, when backoffLimitPerIndex=1": {
249249
job: batch.Job{
250250
Spec: batch.JobSpec{
251-
Completions: pointer.Int32(2),
252-
BackoffLimitPerIndex: pointer.Int32(1),
251+
Completions: ptr.To[int32](2),
252+
BackoffLimitPerIndex: ptr.To[int32](1),
253253
},
254254
},
255255
pod: buildPod().indexFailureCount("1").phase(v1.PodFailed).index("1").trackingFinalizer().Pod,
@@ -259,8 +259,8 @@ func TestIsIndexFailed(t *testing.T) {
259259
enableJobPodFailurePolicy: true,
260260
job: batch.Job{
261261
Spec: batch.JobSpec{
262-
Completions: pointer.Int32(2),
263-
BackoffLimitPerIndex: pointer.Int32(1),
262+
Completions: ptr.To[int32](2),
263+
BackoffLimitPerIndex: ptr.To[int32](1),
264264
PodFailurePolicy: &batch.PodFailurePolicy{
265265
Rules: []batch.PodFailurePolicyRule{
266266
{
@@ -292,8 +292,8 @@ func TestIsIndexFailed(t *testing.T) {
292292
enableJobPodFailurePolicy: false,
293293
job: batch.Job{
294294
Spec: batch.JobSpec{
295-
Completions: pointer.Int32(2),
296-
BackoffLimitPerIndex: pointer.Int32(1),
295+
Completions: ptr.To[int32](2),
296+
BackoffLimitPerIndex: ptr.To[int32](1),
297297
PodFailurePolicy: &batch.PodFailurePolicy{
298298
Rules: []batch.PodFailurePolicyRule{
299299
{
@@ -346,8 +346,8 @@ func TestCalculateFailedIndexes(t *testing.T) {
346346
"one new index failed": {
347347
job: batch.Job{
348348
Spec: batch.JobSpec{
349-
Completions: pointer.Int32(2),
350-
BackoffLimitPerIndex: pointer.Int32(1),
349+
Completions: ptr.To[int32](2),
350+
BackoffLimitPerIndex: ptr.To[int32](1),
351351
},
352352
},
353353
pods: []*v1.Pod{
@@ -359,8 +359,8 @@ func TestCalculateFailedIndexes(t *testing.T) {
359359
"pod without finalizer is ignored": {
360360
job: batch.Job{
361361
Spec: batch.JobSpec{
362-
Completions: pointer.Int32(2),
363-
BackoffLimitPerIndex: pointer.Int32(0),
362+
Completions: ptr.To[int32](2),
363+
BackoffLimitPerIndex: ptr.To[int32](0),
364364
},
365365
},
366366
pods: []*v1.Pod{
@@ -371,8 +371,8 @@ func TestCalculateFailedIndexes(t *testing.T) {
371371
"pod outside completions is ignored": {
372372
job: batch.Job{
373373
Spec: batch.JobSpec{
374-
Completions: pointer.Int32(2),
375-
BackoffLimitPerIndex: pointer.Int32(0),
374+
Completions: ptr.To[int32](2),
375+
BackoffLimitPerIndex: ptr.To[int32](0),
376376
},
377377
},
378378
pods: []*v1.Pod{
@@ -383,11 +383,11 @@ func TestCalculateFailedIndexes(t *testing.T) {
383383
"extend the failed indexes": {
384384
job: batch.Job{
385385
Status: batch.JobStatus{
386-
FailedIndexes: pointer.String("0"),
386+
FailedIndexes: ptr.To("0"),
387387
},
388388
Spec: batch.JobSpec{
389-
Completions: pointer.Int32(2),
390-
BackoffLimitPerIndex: pointer.Int32(0),
389+
Completions: ptr.To[int32](2),
390+
BackoffLimitPerIndex: ptr.To[int32](0),
391391
},
392392
},
393393
pods: []*v1.Pod{
@@ -398,11 +398,11 @@ func TestCalculateFailedIndexes(t *testing.T) {
398398
"prev failed indexes empty": {
399399
job: batch.Job{
400400
Status: batch.JobStatus{
401-
FailedIndexes: pointer.String(""),
401+
FailedIndexes: ptr.To(""),
402402
},
403403
Spec: batch.JobSpec{
404-
Completions: pointer.Int32(2),
405-
BackoffLimitPerIndex: pointer.Int32(0),
404+
Completions: ptr.To[int32](2),
405+
BackoffLimitPerIndex: ptr.To[int32](0),
406406
},
407407
},
408408
pods: []*v1.Pod{
@@ -413,11 +413,11 @@ func TestCalculateFailedIndexes(t *testing.T) {
413413
"prev failed indexes outside the completions": {
414414
job: batch.Job{
415415
Status: batch.JobStatus{
416-
FailedIndexes: pointer.String("9"),
416+
FailedIndexes: ptr.To("9"),
417417
},
418418
Spec: batch.JobSpec{
419-
Completions: pointer.Int32(2),
420-
BackoffLimitPerIndex: pointer.Int32(0),
419+
Completions: ptr.To[int32](2),
420+
BackoffLimitPerIndex: ptr.To[int32](0),
421421
},
422422
},
423423
pods: []*v1.Pod{
@@ -449,8 +449,8 @@ func TestGetPodsWithDelayedDeletionPerIndex(t *testing.T) {
449449
"failed pods are kept corresponding to non-failed indexes are kept": {
450450
job: batch.Job{
451451
Spec: batch.JobSpec{
452-
Completions: pointer.Int32(3),
453-
BackoffLimitPerIndex: pointer.Int32(1),
452+
Completions: ptr.To[int32](3),
453+
BackoffLimitPerIndex: ptr.To[int32](1),
454454
},
455455
},
456456
pods: []*v1.Pod{
@@ -463,8 +463,8 @@ func TestGetPodsWithDelayedDeletionPerIndex(t *testing.T) {
463463
"failed pod without finalizer; the pod's deletion is not delayed as it already started": {
464464
job: batch.Job{
465465
Spec: batch.JobSpec{
466-
Completions: pointer.Int32(2),
467-
BackoffLimitPerIndex: pointer.Int32(0),
466+
Completions: ptr.To[int32](2),
467+
BackoffLimitPerIndex: ptr.To[int32](0),
468468
},
469469
},
470470
pods: []*v1.Pod{
@@ -475,8 +475,8 @@ func TestGetPodsWithDelayedDeletionPerIndex(t *testing.T) {
475475
"failed pod with expected finalizer removal; the pod's deletion is not delayed as it already started": {
476476
job: batch.Job{
477477
Spec: batch.JobSpec{
478-
Completions: pointer.Int32(2),
479-
BackoffLimitPerIndex: pointer.Int32(0),
478+
Completions: ptr.To[int32](2),
479+
BackoffLimitPerIndex: ptr.To[int32](0),
480480
},
481481
},
482482
pods: []*v1.Pod{
@@ -488,8 +488,8 @@ func TestGetPodsWithDelayedDeletionPerIndex(t *testing.T) {
488488
"failed pod with index outside of completions; the pod's deletion is not delayed": {
489489
job: batch.Job{
490490
Spec: batch.JobSpec{
491-
Completions: pointer.Int32(2),
492-
BackoffLimitPerIndex: pointer.Int32(0),
491+
Completions: ptr.To[int32](2),
492+
BackoffLimitPerIndex: ptr.To[int32](0),
493493
},
494494
},
495495
pods: []*v1.Pod{
@@ -500,8 +500,8 @@ func TestGetPodsWithDelayedDeletionPerIndex(t *testing.T) {
500500
"failed pod for active index; the pod's deletion is not delayed as it is already replaced": {
501501
job: batch.Job{
502502
Spec: batch.JobSpec{
503-
Completions: pointer.Int32(2),
504-
BackoffLimitPerIndex: pointer.Int32(1),
503+
Completions: ptr.To[int32](2),
504+
BackoffLimitPerIndex: ptr.To[int32](1),
505505
},
506506
},
507507
pods: []*v1.Pod{
@@ -513,8 +513,8 @@ func TestGetPodsWithDelayedDeletionPerIndex(t *testing.T) {
513513
"failed pod for succeeded index; the pod's deletion is not delayed as it is already replaced": {
514514
job: batch.Job{
515515
Spec: batch.JobSpec{
516-
Completions: pointer.Int32(2),
517-
BackoffLimitPerIndex: pointer.Int32(1),
516+
Completions: ptr.To[int32](2),
517+
BackoffLimitPerIndex: ptr.To[int32](1),
518518
},
519519
},
520520
pods: []*v1.Pod{
@@ -526,8 +526,8 @@ func TestGetPodsWithDelayedDeletionPerIndex(t *testing.T) {
526526
"multiple failed pods for index with different failure count; only the pod with highest failure count is kept": {
527527
job: batch.Job{
528528
Spec: batch.JobSpec{
529-
Completions: pointer.Int32(2),
530-
BackoffLimitPerIndex: pointer.Int32(4),
529+
Completions: ptr.To[int32](2),
530+
BackoffLimitPerIndex: ptr.To[int32](4),
531531
},
532532
},
533533
pods: []*v1.Pod{
@@ -540,8 +540,8 @@ func TestGetPodsWithDelayedDeletionPerIndex(t *testing.T) {
540540
"multiple failed pods for index with different finish times; only the last failed pod is kept": {
541541
job: batch.Job{
542542
Spec: batch.JobSpec{
543-
Completions: pointer.Int32(2),
544-
BackoffLimitPerIndex: pointer.Int32(4),
543+
Completions: ptr.To[int32](2),
544+
BackoffLimitPerIndex: ptr.To[int32](4),
545545
},
546546
},
547547
pods: []*v1.Pod{

pkg/controller/job/job_controller.go

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ import (
5353
"k8s.io/kubernetes/pkg/features"
5454
"k8s.io/utils/clock"
5555
"k8s.io/utils/integer"
56-
"k8s.io/utils/pointer"
56+
"k8s.io/utils/ptr"
5757
)
5858

5959
// controllerKind contains the schema.GroupVersionKind for this controller type.
@@ -783,7 +783,7 @@ func (jm *Controller) syncJob(ctx context.Context, key string) (rErr error) {
783783
}
784784
var terminating *int32
785785
if feature.DefaultFeatureGate.Enabled(features.JobPodReplacementPolicy) {
786-
terminating = pointer.Int32(controller.CountTerminatingPods(pods))
786+
terminating = ptr.To(controller.CountTerminatingPods(pods))
787787
}
788788
jobCtx := &syncJobCtx{
789789
job: &job,
@@ -799,7 +799,7 @@ func (jm *Controller) syncJob(ctx context.Context, key string) (rErr error) {
799799
failed := job.Status.Failed + int32(nonIgnoredFailedPodsCount(jobCtx, newFailedPods)) + int32(len(jobCtx.uncounted.failed))
800800
var ready *int32
801801
if feature.DefaultFeatureGate.Enabled(features.JobReadyPods) {
802-
ready = pointer.Int32(countReadyPods(jobCtx.activePods))
802+
ready = ptr.To(countReadyPods(jobCtx.activePods))
803803
}
804804

805805
// Job first start. Set StartTime only if the job is not in the suspended state.
@@ -918,11 +918,11 @@ func (jm *Controller) syncJob(ctx context.Context, key string) (rErr error) {
918918
}
919919
}
920920

921-
needsStatusUpdate := suspendCondChanged || active != job.Status.Active || !pointer.Int32Equal(ready, job.Status.Ready)
921+
needsStatusUpdate := suspendCondChanged || active != job.Status.Active || !ptr.Equal(ready, job.Status.Ready)
922922
job.Status.Active = active
923923
job.Status.Ready = ready
924924
job.Status.Terminating = jobCtx.terminating
925-
needsStatusUpdate = needsStatusUpdate || !pointer.Int32Equal(job.Status.Terminating, jobCtx.terminating)
925+
needsStatusUpdate = needsStatusUpdate || !ptr.Equal(job.Status.Terminating, jobCtx.terminating)
926926
err = jm.trackJobStatusAndRemoveFinalizers(ctx, jobCtx, needsStatusUpdate)
927927
if err != nil {
928928
return fmt.Errorf("tracking status: %w", err)
@@ -1106,9 +1106,9 @@ func (jm *Controller) trackJobStatusAndRemoveFinalizers(ctx context.Context, job
11061106
jobCtx.job.Status.CompletedIndexes = succeededIndexesStr
11071107
var failedIndexesStr *string
11081108
if jobCtx.failedIndexes != nil {
1109-
failedIndexesStr = pointer.String(jobCtx.failedIndexes.String())
1109+
failedIndexesStr = ptr.To(jobCtx.failedIndexes.String())
11101110
}
1111-
if !pointer.StringEqual(jobCtx.job.Status.FailedIndexes, failedIndexesStr) {
1111+
if !ptr.Equal(jobCtx.job.Status.FailedIndexes, failedIndexesStr) {
11121112
jobCtx.job.Status.FailedIndexes = failedIndexesStr
11131113
needsFlush = true
11141114
}
@@ -1642,7 +1642,7 @@ func (jm *Controller) getPodCreationInfoForIndependentIndexes(logger klog.Logger
16421642
if len(indexesToAddNow) > 0 {
16431643
return indexesToAddNow, 0
16441644
}
1645-
return indexesToAddNow, pointer.DurationDeref(minRemainingTimePerIndex, 0)
1645+
return indexesToAddNow, ptr.Deref(minRemainingTimePerIndex, 0)
16461646
}
16471647

16481648
// activePodsForRemoval returns Pods that should be removed because there

0 commit comments

Comments
 (0)