Skip to content

Commit 71b8ebb

Browse files
committed
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Thomas Gleixner: "A few scheduler fixes: - Prevent a bogus warning vs. runqueue clock update flags in do_sched_rt_period_timer() - Simplify the helper functions which handle requests for skipping the runqueue clock updat. - Do not unlock the tunables mutex in the error path of the cpu frequency scheduler utils. Its not held. - Enforce proper alignement for 'struct util_est' in sched_avg to prevent a misalignment fault on IA64" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/core: Force proper alignment of 'struct util_est' sched/core: Simplify helpers for rq clock update skip requests sched/rt: Fix rq->clock_update_flags < RQCF_ACT_SKIP warning sched/cpufreq/schedutil: Fix error path mutex unlock
2 parents 174e719 + 317d359 commit 71b8ebb

File tree

7 files changed

+22
-14
lines changed

7 files changed

+22
-14
lines changed

include/linux/sched.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -300,7 +300,7 @@ struct util_est {
300300
unsigned int enqueued;
301301
unsigned int ewma;
302302
#define UTIL_EST_WEIGHT_SHIFT 2
303-
};
303+
} __attribute__((__aligned__(sizeof(u64))));
304304

305305
/*
306306
* The load_avg/util_avg accumulates an infinite geometric series
@@ -364,7 +364,7 @@ struct sched_avg {
364364
unsigned long runnable_load_avg;
365365
unsigned long util_avg;
366366
struct util_est util_est;
367-
};
367+
} ____cacheline_aligned;
368368

369369
struct sched_statistics {
370370
#ifdef CONFIG_SCHEDSTATS
@@ -435,7 +435,7 @@ struct sched_entity {
435435
* Put into separate cache line so it does not
436436
* collide with read-mostly values above.
437437
*/
438-
struct sched_avg avg ____cacheline_aligned_in_smp;
438+
struct sched_avg avg;
439439
#endif
440440
};
441441

kernel/sched/core.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -874,7 +874,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
874874
* this case, we can save a useless back to back clock update.
875875
*/
876876
if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
877-
rq_clock_skip_update(rq, true);
877+
rq_clock_skip_update(rq);
878878
}
879879

880880
#ifdef CONFIG_SMP

kernel/sched/cpufreq_schedutil.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -631,10 +631,9 @@ static int sugov_init(struct cpufreq_policy *policy)
631631

632632
stop_kthread:
633633
sugov_kthread_stop(sg_policy);
634-
635-
free_sg_policy:
636634
mutex_unlock(&global_tunables_lock);
637635

636+
free_sg_policy:
638637
sugov_policy_free(sg_policy);
639638

640639
disable_fast_switch:

kernel/sched/deadline.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1560,7 +1560,7 @@ static void yield_task_dl(struct rq *rq)
15601560
* so we don't do microscopic update in schedule()
15611561
* and double the fastpath cost.
15621562
*/
1563-
rq_clock_skip_update(rq, true);
1563+
rq_clock_skip_update(rq);
15641564
}
15651565

15661566
#ifdef CONFIG_SMP

kernel/sched/fair.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7089,7 +7089,7 @@ static void yield_task_fair(struct rq *rq)
70897089
* so we don't do microscopic update in schedule()
70907090
* and double the fastpath cost.
70917091
*/
7092-
rq_clock_skip_update(rq, true);
7092+
rq_clock_skip_update(rq);
70937093
}
70947094

70957095
set_skip_buddy(se);

kernel/sched/rt.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -839,6 +839,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
839839
continue;
840840

841841
raw_spin_lock(&rq->lock);
842+
update_rq_clock(rq);
843+
842844
if (rt_rq->rt_time) {
843845
u64 runtime;
844846

@@ -859,7 +861,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
859861
* 'runtime'.
860862
*/
861863
if (rt_rq->rt_nr_running && rq->curr == rq->idle)
862-
rq_clock_skip_update(rq, false);
864+
rq_clock_cancel_skipupdate(rq);
863865
}
864866
if (rt_rq->rt_time || rt_rq->rt_nr_running)
865867
idle = 0;

kernel/sched/sched.h

Lines changed: 12 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -976,13 +976,20 @@ static inline u64 rq_clock_task(struct rq *rq)
976976
return rq->clock_task;
977977
}
978978

979-
static inline void rq_clock_skip_update(struct rq *rq, bool skip)
979+
static inline void rq_clock_skip_update(struct rq *rq)
980980
{
981981
lockdep_assert_held(&rq->lock);
982-
if (skip)
983-
rq->clock_update_flags |= RQCF_REQ_SKIP;
984-
else
985-
rq->clock_update_flags &= ~RQCF_REQ_SKIP;
982+
rq->clock_update_flags |= RQCF_REQ_SKIP;
983+
}
984+
985+
/*
986+
* See rt task throttoling, which is the only time a skip
987+
* request is cancelled.
988+
*/
989+
static inline void rq_clock_cancel_skipupdate(struct rq *rq)
990+
{
991+
lockdep_assert_held(&rq->lock);
992+
rq->clock_update_flags &= ~RQCF_REQ_SKIP;
986993
}
987994

988995
struct rq_flags {

0 commit comments

Comments
 (0)