Skip to content

Commit adcc8da

Browse files
Davidlohr BuesoIngo Molnar
authored andcommitted
sched/core: Simplify helpers for rq clock update skip requests
By renaming the functions we can get rid of the skip parameter and have better code redability. It makes zero sense to have things such as: rq_clock_skip_update(rq, false) When the skip request is in fact not going to happen. Ever. Rename things such that we end up with: rq_clock_skip_update(rq) rq_clock_cancel_skipupdate(rq) Signed-off-by: Davidlohr Bueso <[email protected]> Acked-by: Peter Zijlstra (Intel) <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Mike Galbraith <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: [email protected] Cc: [email protected] Cc: [email protected] Link: http://lkml.kernel.org/r/20180404161539.nhadkff2aats74jh@linux-n805 Signed-off-by: Ingo Molnar <[email protected]>
1 parent d29a206 commit adcc8da

File tree

5 files changed

+16
-9
lines changed

5 files changed

+16
-9
lines changed

kernel/sched/core.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -874,7 +874,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
874874
* this case, we can save a useless back to back clock update.
875875
*/
876876
if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
877-
rq_clock_skip_update(rq, true);
877+
rq_clock_skip_update(rq);
878878
}
879879

880880
#ifdef CONFIG_SMP

kernel/sched/deadline.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1560,7 +1560,7 @@ static void yield_task_dl(struct rq *rq)
15601560
* so we don't do microscopic update in schedule()
15611561
* and double the fastpath cost.
15621562
*/
1563-
rq_clock_skip_update(rq, true);
1563+
rq_clock_skip_update(rq);
15641564
}
15651565

15661566
#ifdef CONFIG_SMP

kernel/sched/fair.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7089,7 +7089,7 @@ static void yield_task_fair(struct rq *rq)
70897089
* so we don't do microscopic update in schedule()
70907090
* and double the fastpath cost.
70917091
*/
7092-
rq_clock_skip_update(rq, true);
7092+
rq_clock_skip_update(rq);
70937093
}
70947094

70957095
set_skip_buddy(se);

kernel/sched/rt.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -861,7 +861,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
861861
* 'runtime'.
862862
*/
863863
if (rt_rq->rt_nr_running && rq->curr == rq->idle)
864-
rq_clock_skip_update(rq, false);
864+
rq_clock_cancel_skipupdate(rq);
865865
}
866866
if (rt_rq->rt_time || rt_rq->rt_nr_running)
867867
idle = 0;

kernel/sched/sched.h

Lines changed: 12 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -976,13 +976,20 @@ static inline u64 rq_clock_task(struct rq *rq)
976976
return rq->clock_task;
977977
}
978978

979-
static inline void rq_clock_skip_update(struct rq *rq, bool skip)
979+
static inline void rq_clock_skip_update(struct rq *rq)
980980
{
981981
lockdep_assert_held(&rq->lock);
982-
if (skip)
983-
rq->clock_update_flags |= RQCF_REQ_SKIP;
984-
else
985-
rq->clock_update_flags &= ~RQCF_REQ_SKIP;
982+
rq->clock_update_flags |= RQCF_REQ_SKIP;
983+
}
984+
985+
/*
986+
* See rt task throttoling, which is the only time a skip
987+
* request is cancelled.
988+
*/
989+
static inline void rq_clock_cancel_skipupdate(struct rq *rq)
990+
{
991+
lockdep_assert_held(&rq->lock);
992+
rq->clock_update_flags &= ~RQCF_REQ_SKIP;
986993
}
987994

988995
struct rq_flags {

0 commit comments

Comments
 (0)