Skip to content

Commit 9c8611c

Browse files
fix: lint errors reported after upgrading to golangci-lint v2.5.0 (#4478)
#### What type of PR is this? Fix for failing CI after upgrading to golangci-lint v2.5.0 #### What this PR does / why we need it: From golangci-lint v2.5.0 changelog: * gofumpt: from 0.8.0 to 0.9.1 (new rule is to “clothe” naked returns for the sake of clarity) Latest version of `gofumpt` reports naked returns (just doing `return` when a function has named parameters) as lint issues. There doesn't seem to be an option for disabling this rule. Options are: 1. Auto-fix code using `golangci-lint run --fix` 2. Revert to golangci-lint v2.4.0 3. Disable `gofumpt` linter Code was auto-fixed by running `golangci-lint run --fix` --------- Signed-off-by: Dejan Zele Pejchev <pejcev.dejan@gmail.com> Co-authored-by: JamesMurkin <jamesmurkin@hotmail.com>
1 parent 4b13732 commit 9c8611c

File tree

6 files changed

+81
-56
lines changed

6 files changed

+81
-56
lines changed

internal/common/armadaerrors/errors.go

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,8 @@ type ErrUnauthorized struct {
4141
Message string
4242
}
4343

44-
func (err *ErrUnauthorized) Error() (s string) {
44+
func (err *ErrUnauthorized) Error() string {
45+
var s string
4546
if err.Action != "" {
4647
s = fmt.Sprintf("%s lacks permission %s required for action %s", err.Principal, err.Permission, err.Action)
4748
} else {
@@ -50,7 +51,7 @@ func (err *ErrUnauthorized) Error() (s string) {
5051
if err.Message != "" {
5152
s += fmt.Sprintf("; %s", err.Message)
5253
}
53-
return
54+
return s
5455
}
5556

5657
// ErrAlreadyExists is a generic error to be returned whenever some resource already exists.
@@ -496,15 +497,15 @@ func (err *ErrUnauthenticated) GRPCStatus() *status.Status {
496497
return status.New(codes.Unauthenticated, err.Error())
497498
}
498499

499-
func (err *ErrUnauthenticated) Error() (s string) {
500-
s = "Request could not be authenticated"
500+
func (err *ErrUnauthenticated) Error() string {
501+
s := "Request could not be authenticated"
501502
if err.Action != "" {
502503
s += fmt.Sprintf(" for action %q", err.Action)
503504
}
504505
if err.Message != "" {
505506
s += fmt.Sprintf(": %s", err.Message)
506507
}
507-
return
508+
return s
508509
}
509510

510511
// ErrInvalidCredentials is returned when a given set of credentials cannot
@@ -590,8 +591,8 @@ func craftFullErrorMessageForAuthRelatedErrors(mainMessage string,
590591
authServiceName string,
591592
action string,
592593
auxMessage string,
593-
) (s string) {
594-
s = mainMessage
594+
) string {
595+
s := mainMessage
595596
if username != "" {
596597
s += fmt.Sprintf(" for user %q", username)
597598
}
@@ -604,5 +605,5 @@ func craftFullErrorMessageForAuthRelatedErrors(mainMessage string,
604605
if auxMessage != "" {
605606
s += fmt.Sprintf(": %s", auxMessage)
606607
}
607-
return
608+
return s
608609
}

internal/scheduler/jobdb/reconciliation.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -147,10 +147,10 @@ func markJobsAsPreemptionRequested(txn *Txn, jobIds []string, jsts map[string]Jo
147147
func (jobDb *JobDb) reconcileJobDifferences(job *Job, jobRepoJob *database.Job, jobRepoRuns []*database.Run) (jst JobStateTransitions, err error) {
148148
defer func() { jst.Job = job }()
149149
if job == nil && jobRepoJob == nil {
150-
return
150+
return jst, err
151151
} else if job == nil && jobRepoJob != nil {
152152
if job, err = jobDb.schedulerJobFromDatabaseJob(jobRepoJob); err != nil {
153-
return
153+
return jst, err
154154
}
155155
jst.Queued = true
156156
} else if job != nil && jobRepoJob == nil {
@@ -210,13 +210,13 @@ func (jobDb *JobDb) reconcileJobDifferences(job *Job, jobRepoJob *database.Job,
210210
job = job.WithUpdatedRun(rst.JobRun)
211211
}
212212

213-
return
213+
return jst, err
214214
}
215215

216216
func (jobDb *JobDb) reconcileRunDifferences(jobRun *JobRun, jobRepoRun *database.Run) (rst RunStateTransitions) {
217217
defer func() { rst.JobRun = jobRun }()
218218
if jobRun == nil && jobRepoRun == nil {
219-
return
219+
return rst
220220
} else if jobRun == nil && jobRepoRun != nil {
221221
jobRun = jobDb.schedulerRunFromDatabaseRun(jobRepoRun)
222222
rst.Returned = jobRepoRun.Returned
@@ -228,7 +228,7 @@ func (jobDb *JobDb) reconcileRunDifferences(jobRun *JobRun, jobRepoRun *database
228228
rst.Failed = jobRepoRun.Failed
229229
rst.Succeeded = jobRepoRun.Succeeded
230230
} else if jobRun != nil && jobRepoRun == nil {
231-
return
231+
return rst
232232
} else if jobRun != nil && jobRepoRun != nil {
233233
if jobRepoRun.LeasedTimestamp != nil && !jobRun.Leased() {
234234
jobRun = jobRun.WithLeased(true).WithLeasedTime(jobRepoRun.LeasedTimestamp)
@@ -271,7 +271,7 @@ func (jobDb *JobDb) reconcileRunDifferences(jobRun *JobRun, jobRepoRun *database
271271
}
272272
}
273273
jobRun = jobDb.enforceTerminalStateExclusivity(jobRun, &rst)
274-
return
274+
return rst
275275
}
276276

277277
// enforceTerminalStateExclusivity ensures that a job run has a single terminal state regardless of what the database reports.

internal/scheduler/nodedb/nodedb.go

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -471,7 +471,9 @@ func (nodeDb *NodeDb) selectNodeForJobWithTxnAndAwayNodeType(
471471
txn *memdb.Txn,
472472
jctx *context.JobSchedulingContext,
473473
awayNodeType types.AwayNodeType,
474-
) (node *internaltypes.Node, err error) {
474+
) (*internaltypes.Node, error) {
475+
var node *internaltypes.Node
476+
var err error
475477
// Save the number of additional tolerations that the job originally had; we
476478
// use this value to restore the slice of additional toleration at the end
477479
// of each loop iteration.
@@ -497,7 +499,7 @@ func (nodeDb *NodeDb) selectNodeForJobWithTxnAndAwayNodeType(
497499

498500
jctx.PodSchedulingContext.ScheduledAtPriority = awayNodeType.Priority
499501
node, err = nodeDb.selectNodeForJobWithTxnAtPriority(txn, jctx)
500-
return
502+
return node, err
501503
}
502504

503505
func (nodeDb *NodeDb) selectNodeForJobWithTxnAtPriority(

internal/scheduler/scheduling/gang_scheduler.go

Lines changed: 23 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -101,7 +101,7 @@ func (sch *GangScheduler) Schedule(ctx *armadacontext.Context, gctx *context.Gan
101101
// Exit immediately if this is a new gang and we've hit any round limits.
102102
if !gctx.AllJobsEvicted {
103103
if ok, unschedulableReason, err = sch.constraints.CheckRoundConstraints(sch.schedulingContext); err != nil || !ok {
104-
return
104+
return ok, unschedulableReason, err
105105
}
106106
}
107107

@@ -130,26 +130,29 @@ func (sch *GangScheduler) Schedule(ctx *armadacontext.Context, gctx *context.Gan
130130
}()
131131

132132
if _, err = sch.schedulingContext.AddGangSchedulingContext(gctx); err != nil {
133-
return
133+
return ok, unschedulableReason, err
134134
}
135135
gangAddedToSchedulingContext = true
136136
if !gctx.AllJobsEvicted {
137137
// Only perform these checks for new jobs to avoid preempting jobs if, e.g., MinimumJobSize changes.
138138
if ok, unschedulableReason, err = sch.constraints.CheckJobConstraints(sch.schedulingContext, gctx); err != nil || !ok {
139-
return
139+
return ok, unschedulableReason, err
140140
}
141141
}
142142

143143
if gctx.RequestsFloatingResources {
144144
if ok, unschedulableReason = sch.floatingResourceTypes.WithinLimits(sch.schedulingContext.Pool, sch.schedulingContext.Allocated); !ok {
145-
return
145+
return ok, unschedulableReason, err
146146
}
147147
}
148148

149149
return sch.trySchedule(ctx, gctx)
150150
}
151151

152-
func (sch *GangScheduler) trySchedule(ctx *armadacontext.Context, gctx *context.GangSchedulingContext) (ok bool, unschedulableReason string, err error) {
152+
func (sch *GangScheduler) trySchedule(ctx *armadacontext.Context, gctx *context.GangSchedulingContext) (bool, string, error) {
153+
var ok bool
154+
var unschedulableReason string
155+
var err error
153156
nodeUniformity := gctx.NodeUniformityLabel()
154157

155158
// If no node uniformity or isn't a gang, try scheduling across all nodes.
@@ -163,12 +166,12 @@ func (sch *GangScheduler) trySchedule(ctx *armadacontext.Context, gctx *context.
163166
if !ok {
164167
ok = false
165168
unschedulableReason = fmt.Sprintf("uniformity label %s is not indexed", nodeUniformity)
166-
return
169+
return ok, unschedulableReason, err
167170
}
168171
if len(nodeUniformityLabelValues) == 0 {
169172
ok = false
170173
unschedulableReason = fmt.Sprintf("no nodes with uniformity label %s", nodeUniformity)
171-
return
174+
return ok, unschedulableReason, err
172175
}
173176

174177
// Try all possible values of nodeUniformityLabel one at a time to find the best fit.
@@ -185,7 +188,7 @@ func (sch *GangScheduler) trySchedule(ctx *armadacontext.Context, gctx *context.
185188
ok, unschedulableReason, err = sch.tryScheduleGangWithTxn(ctx, txn, gctx)
186189
if err != nil {
187190
txn.Abort()
188-
return
191+
return ok, unschedulableReason, err
189192
}
190193
if ok {
191194
currentFit := gctx.Fit()
@@ -210,23 +213,29 @@ func (sch *GangScheduler) trySchedule(ctx *armadacontext.Context, gctx *context.
210213
if bestValue == "" {
211214
ok = false
212215
unschedulableReason = "at least one job in the gang does not fit on any node"
213-
return
216+
return ok, unschedulableReason, err
214217
}
215218
addNodeSelectorToGctx(gctx, gctx.NodeUniformityLabel(), bestValue)
216219
return sch.tryScheduleGang(ctx, gctx)
217220
}
218221

219-
func (sch *GangScheduler) tryScheduleGang(ctx *armadacontext.Context, gctx *context.GangSchedulingContext) (ok bool, unschedulableReason string, err error) {
222+
func (sch *GangScheduler) tryScheduleGang(ctx *armadacontext.Context, gctx *context.GangSchedulingContext) (bool, string, error) {
223+
var ok bool
224+
var unschedulableReason string
225+
var err error
220226
txn := sch.nodeDb.Txn(true)
221227
defer txn.Abort()
222228
ok, unschedulableReason, err = sch.tryScheduleGangWithTxn(ctx, txn, gctx)
223229
if ok && err == nil {
224230
txn.Commit()
225231
}
226-
return
232+
return ok, unschedulableReason, err
227233
}
228234

229-
func (sch *GangScheduler) tryScheduleGangWithTxn(_ *armadacontext.Context, txn *memdb.Txn, gctx *context.GangSchedulingContext) (ok bool, unschedulableReason string, err error) {
235+
func (sch *GangScheduler) tryScheduleGangWithTxn(_ *armadacontext.Context, txn *memdb.Txn, gctx *context.GangSchedulingContext) (bool, string, error) {
236+
var ok bool
237+
var unschedulableReason string
238+
var err error
230239
if ok, err = sch.nodeDb.ScheduleManyWithTxn(txn, gctx); err == nil {
231240
if !ok {
232241
if gctx.Cardinality() > 1 {
@@ -235,9 +244,9 @@ func (sch *GangScheduler) tryScheduleGangWithTxn(_ *armadacontext.Context, txn *
235244
unschedulableReason = schedulerconstraints.JobDoesNotFitUnschedulableReason
236245
}
237246
}
238-
return
247+
return ok, unschedulableReason, err
239248
}
240-
return
249+
return ok, unschedulableReason, err
241250
}
242251

243252
func addNodeSelectorToGctx(gctx *context.GangSchedulingContext, nodeSelectorKey, nodeSelectorValue string) {

internal/scheduler/scheduling/gang_scheduler_test.go

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -444,7 +444,8 @@ func TestGangScheduler(t *testing.T) {
444444
{Key: "taint-a", Value: "true", Effect: v1.TaintEffectNoSchedule},
445445
{Key: "taint-b", Value: "true", Effect: v1.TaintEffectNoSchedule},
446446
}),
447-
Gangs: func() (gangs [][]*jobdb.Job) {
447+
Gangs: func() [][]*jobdb.Job {
448+
var gangs [][]*jobdb.Job
448449
var jobId ulid.ULID
449450
jobId = util.ULID()
450451
gangs = append(gangs, []*jobdb.Job{
@@ -453,7 +454,7 @@ func TestGangScheduler(t *testing.T) {
453454
})
454455
jobId = util.ULID()
455456
gangs = append(gangs, []*jobdb.Job{testfixtures.TestJob("A", jobId, "armada-preemptible-away-both", testfixtures.Test1Cpu4GiPodReqs("A", jobId, 30000))})
456-
return
457+
return gangs
457458
}(),
458459
ExpectedScheduledIndices: []int{1},
459460
ExpectedCumulativeScheduledJobs: []int{0, 1},
@@ -493,12 +494,13 @@ func TestGangScheduler(t *testing.T) {
493494
{Key: "taint-a", Value: "true", Effect: v1.TaintEffectNoSchedule},
494495
},
495496
),
496-
Gangs: func() (gangs [][]*jobdb.Job) {
497+
Gangs: func() [][]*jobdb.Job {
498+
var gangs [][]*jobdb.Job
497499
jobId := util.ULID()
498500
gangs = append(gangs, []*jobdb.Job{testfixtures.TestJob("A", jobId, "armada-preemptible-away", testfixtures.Test32Cpu256GiWithLargeJobTolerationPodReqs("A", jobId, 30000))})
499501
jobId = util.ULID()
500502
gangs = append(gangs, []*jobdb.Job{testfixtures.TestJob("A", jobId, "armada-preemptible-away", testfixtures.Test32Cpu256GiWithLargeJobTolerationPodReqs("A", jobId, 30000))})
501-
return
503+
return gangs
502504
}(),
503505
ExpectedScheduledIndices: []int{0},
504506
ExpectedCumulativeScheduledJobs: []int{1, 1},

0 commit comments

Comments
 (0)