Skip to content

Commit 3a181f2

Browse files
walacPeter Zijlstra
authored andcommitted
sched/deadline: Consolidate Timer Cancellation
After commit b58652d ("sched/deadline: Fix task_struct reference leak"), I identified additional calls to hrtimer_try_to_cancel that might also require a dl_server check. It remains unclear whether this omission was intentional or accidental in those contexts. This patch consolidates the timer cancellation logic into dedicated functions, ensuring consistent behavior across all calls. Additionally, it reduces code duplication and improves overall code cleanliness. Note the use of the __always_inline keyword. In some instances, we have a task_struct pointer, dereference the dl member, and then use the container_of macro to retrieve the task_struct pointer again. By inlining the code, the compiler can potentially optimize out this redundant round trip. Signed-off-by: Wander Lairson Costa <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Acked-by: Juri Lelli <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 53916d5 commit 3a181f2

File tree

1 file changed

+27
-14
lines changed

1 file changed

+27
-14
lines changed

kernel/sched/deadline.c

Lines changed: 27 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -342,6 +342,29 @@ static void dl_rq_change_utilization(struct rq *rq, struct sched_dl_entity *dl_s
342342
__add_rq_bw(new_bw, &rq->dl);
343343
}
344344

345+
static __always_inline
346+
void cancel_dl_timer(struct sched_dl_entity *dl_se, struct hrtimer *timer)
347+
{
348+
/*
349+
* If the timer callback was running (hrtimer_try_to_cancel == -1),
350+
* it will eventually call put_task_struct().
351+
*/
352+
if (hrtimer_try_to_cancel(timer) == 1 && !dl_server(dl_se))
353+
put_task_struct(dl_task_of(dl_se));
354+
}
355+
356+
static __always_inline
357+
void cancel_replenish_timer(struct sched_dl_entity *dl_se)
358+
{
359+
cancel_dl_timer(dl_se, &dl_se->dl_timer);
360+
}
361+
362+
static __always_inline
363+
void cancel_inactive_timer(struct sched_dl_entity *dl_se)
364+
{
365+
cancel_dl_timer(dl_se, &dl_se->inactive_timer);
366+
}
367+
345368
static void dl_change_utilization(struct task_struct *p, u64 new_bw)
346369
{
347370
WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_SUGOV);
@@ -495,10 +518,7 @@ static void task_contending(struct sched_dl_entity *dl_se, int flags)
495518
* will not touch the rq's active utilization,
496519
* so we are still safe.
497520
*/
498-
if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1) {
499-
if (!dl_server(dl_se))
500-
put_task_struct(dl_task_of(dl_se));
501-
}
521+
cancel_inactive_timer(dl_se);
502522
} else {
503523
/*
504524
* Since "dl_non_contending" is not set, the
@@ -2113,13 +2133,8 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
21132133
* The replenish timer needs to be canceled. No
21142134
* problem if it fires concurrently: boosted threads
21152135
* are ignored in dl_task_timer().
2116-
*
2117-
* If the timer callback was running (hrtimer_try_to_cancel == -1),
2118-
* it will eventually call put_task_struct().
21192136
*/
2120-
if (hrtimer_try_to_cancel(&p->dl.dl_timer) == 1 &&
2121-
!dl_server(&p->dl))
2122-
put_task_struct(p);
2137+
cancel_replenish_timer(&p->dl);
21232138
p->dl.dl_throttled = 0;
21242139
}
21252140
} else if (!dl_prio(p->normal_prio)) {
@@ -2287,8 +2302,7 @@ static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused
22872302
* will not touch the rq's active utilization,
22882303
* so we are still safe.
22892304
*/
2290-
if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
2291-
put_task_struct(p);
2305+
cancel_inactive_timer(&p->dl);
22922306
}
22932307
sub_rq_bw(&p->dl, &rq->dl);
22942308
rq_unlock(rq, &rf);
@@ -3036,8 +3050,7 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
30363050
*/
30373051
static void switched_to_dl(struct rq *rq, struct task_struct *p)
30383052
{
3039-
if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
3040-
put_task_struct(p);
3053+
cancel_inactive_timer(&p->dl);
30413054

30423055
/*
30433056
* In case a task is setscheduled to SCHED_DEADLINE we need to keep

0 commit comments

Comments
 (0)