Skip to content

Commit f7d2728

Browse files
author
Ingo Molnar
committed
sched/debug: Change SCHED_WARN_ON() to WARN_ON_ONCE()
The scheduler has this special SCHED_WARN() facility that depends on CONFIG_SCHED_DEBUG. Since CONFIG_SCHED_DEBUG is getting removed, convert SCHED_WARN() to WARN_ON_ONCE(). Note that the warning output isn't 100% equivalent: #define SCHED_WARN_ON(x) WARN_ONCE(x, #x) Because SCHED_WARN_ON() would output the 'x' condition as well, while WARN_ONCE() will only show a backtrace. Hopefully these are rare enough to not really matter. If it does, we should probably introduce a new WARN_ON() variant that outputs the condition in stringified form, or improve WARN_ON() itself. Signed-off-by: Ingo Molnar <[email protected]> Tested-by: Shrikanth Hegde <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Juri Lelli <[email protected]> Cc: Vincent Guittot <[email protected]> Cc: Dietmar Eggemann <[email protected]> Cc: Steven Rostedt <[email protected]> Cc: Ben Segall <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Valentin Schneider <[email protected]> Cc: Linus Torvalds <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent d047e32 commit f7d2728

File tree

8 files changed

+56
-62
lines changed

8 files changed

+56
-62
lines changed

kernel/sched/core.c

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -801,7 +801,7 @@ void update_rq_clock(struct rq *rq)
801801

802802
#ifdef CONFIG_SCHED_DEBUG
803803
if (sched_feat(WARN_DOUBLE_CLOCK))
804-
SCHED_WARN_ON(rq->clock_update_flags & RQCF_UPDATED);
804+
WARN_ON_ONCE(rq->clock_update_flags & RQCF_UPDATED);
805805
rq->clock_update_flags |= RQCF_UPDATED;
806806
#endif
807807
clock = sched_clock_cpu(cpu_of(rq));
@@ -1719,7 +1719,7 @@ static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
17191719

17201720
bucket = &uc_rq->bucket[uc_se->bucket_id];
17211721

1722-
SCHED_WARN_ON(!bucket->tasks);
1722+
WARN_ON_ONCE(!bucket->tasks);
17231723
if (likely(bucket->tasks))
17241724
bucket->tasks--;
17251725

@@ -1739,7 +1739,7 @@ static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
17391739
* Defensive programming: this should never happen. If it happens,
17401740
* e.g. due to future modification, warn and fix up the expected value.
17411741
*/
1742-
SCHED_WARN_ON(bucket->value > rq_clamp);
1742+
WARN_ON_ONCE(bucket->value > rq_clamp);
17431743
if (bucket->value >= rq_clamp) {
17441744
bkt_clamp = uclamp_rq_max_value(rq, clamp_id, uc_se->value);
17451745
uclamp_rq_set(rq, clamp_id, bkt_clamp);
@@ -2121,7 +2121,7 @@ void activate_task(struct rq *rq, struct task_struct *p, int flags)
21212121

21222122
void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
21232123
{
2124-
SCHED_WARN_ON(flags & DEQUEUE_SLEEP);
2124+
WARN_ON_ONCE(flags & DEQUEUE_SLEEP);
21252125

21262126
WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
21272127
ASSERT_EXCLUSIVE_WRITER(p->on_rq);
@@ -2726,7 +2726,7 @@ __do_set_cpus_allowed(struct task_struct *p, struct affinity_context *ctx)
27262726
* XXX do further audits, this smells like something putrid.
27272727
*/
27282728
if (ctx->flags & SCA_MIGRATE_DISABLE)
2729-
SCHED_WARN_ON(!p->on_cpu);
2729+
WARN_ON_ONCE(!p->on_cpu);
27302730
else
27312731
lockdep_assert_held(&p->pi_lock);
27322732

@@ -4195,7 +4195,7 @@ int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
41954195
* - we're serialized against set_special_state() by virtue of
41964196
* it disabling IRQs (this allows not taking ->pi_lock).
41974197
*/
4198-
SCHED_WARN_ON(p->se.sched_delayed);
4198+
WARN_ON_ONCE(p->se.sched_delayed);
41994199
if (!ttwu_state_match(p, state, &success))
42004200
goto out;
42014201

@@ -4489,7 +4489,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
44894489
INIT_LIST_HEAD(&p->se.group_node);
44904490

44914491
/* A delayed task cannot be in clone(). */
4492-
SCHED_WARN_ON(p->se.sched_delayed);
4492+
WARN_ON_ONCE(p->se.sched_delayed);
44934493

44944494
#ifdef CONFIG_FAIR_GROUP_SCHED
44954495
p->se.cfs_rq = NULL;
@@ -5745,7 +5745,7 @@ static void sched_tick_remote(struct work_struct *work)
57455745
* we are always sure that there is no proxy (only a
57465746
* single task is running).
57475747
*/
5748-
SCHED_WARN_ON(rq->curr != rq->donor);
5748+
WARN_ON_ONCE(rq->curr != rq->donor);
57495749
update_rq_clock(rq);
57505750

57515751
if (!is_idle_task(curr)) {
@@ -5965,7 +5965,7 @@ static inline void schedule_debug(struct task_struct *prev, bool preempt)
59655965
preempt_count_set(PREEMPT_DISABLED);
59665966
}
59675967
rcu_sleep_check();
5968-
SCHED_WARN_ON(ct_state() == CT_STATE_USER);
5968+
WARN_ON_ONCE(ct_state() == CT_STATE_USER);
59695969

59705970
profile_hit(SCHED_PROFILING, __builtin_return_address(0));
59715971

@@ -6811,7 +6811,7 @@ static inline void sched_submit_work(struct task_struct *tsk)
68116811
* deadlock if the callback attempts to acquire a lock which is
68126812
* already acquired.
68136813
*/
6814-
SCHED_WARN_ON(current->__state & TASK_RTLOCK_WAIT);
6814+
WARN_ON_ONCE(current->__state & TASK_RTLOCK_WAIT);
68156815

68166816
/*
68176817
* If we are going to sleep and we have plugged IO queued,
@@ -9249,7 +9249,7 @@ static void cpu_util_update_eff(struct cgroup_subsys_state *css)
92499249
unsigned int clamps;
92509250

92519251
lockdep_assert_held(&uclamp_mutex);
9252-
SCHED_WARN_ON(!rcu_read_lock_held());
9252+
WARN_ON_ONCE(!rcu_read_lock_held());
92539253

92549254
css_for_each_descendant_pre(css, top_css) {
92559255
uc_parent = css_tg(css)->parent
@@ -10584,7 +10584,7 @@ static void task_mm_cid_work(struct callback_head *work)
1058410584
struct mm_struct *mm;
1058510585
int weight, cpu;
1058610586

10587-
SCHED_WARN_ON(t != container_of(work, struct task_struct, cid_work));
10587+
WARN_ON_ONCE(t != container_of(work, struct task_struct, cid_work));
1058810588

1058910589
work->next = work; /* Prevent double-add */
1059010590
if (t->flags & PF_EXITING)

kernel/sched/core_sched.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ static unsigned long sched_core_update_cookie(struct task_struct *p,
6565
* a cookie until after we've removed it, we must have core scheduling
6666
* enabled here.
6767
*/
68-
SCHED_WARN_ON((p->core_cookie || cookie) && !sched_core_enabled(rq));
68+
WARN_ON_ONCE((p->core_cookie || cookie) && !sched_core_enabled(rq));
6969

7070
if (sched_core_enqueued(p))
7171
sched_core_dequeue(rq, p, DEQUEUE_SAVE);

kernel/sched/deadline.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -249,8 +249,8 @@ void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
249249

250250
lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
251251
dl_rq->running_bw += dl_bw;
252-
SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
253-
SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
252+
WARN_ON_ONCE(dl_rq->running_bw < old); /* overflow */
253+
WARN_ON_ONCE(dl_rq->running_bw > dl_rq->this_bw);
254254
/* kick cpufreq (see the comment in kernel/sched/sched.h). */
255255
cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
256256
}
@@ -262,7 +262,7 @@ void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
262262

263263
lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
264264
dl_rq->running_bw -= dl_bw;
265-
SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
265+
WARN_ON_ONCE(dl_rq->running_bw > old); /* underflow */
266266
if (dl_rq->running_bw > old)
267267
dl_rq->running_bw = 0;
268268
/* kick cpufreq (see the comment in kernel/sched/sched.h). */
@@ -276,7 +276,7 @@ void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
276276

277277
lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
278278
dl_rq->this_bw += dl_bw;
279-
SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
279+
WARN_ON_ONCE(dl_rq->this_bw < old); /* overflow */
280280
}
281281

282282
static inline
@@ -286,10 +286,10 @@ void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
286286

287287
lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
288288
dl_rq->this_bw -= dl_bw;
289-
SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
289+
WARN_ON_ONCE(dl_rq->this_bw > old); /* underflow */
290290
if (dl_rq->this_bw > old)
291291
dl_rq->this_bw = 0;
292-
SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
292+
WARN_ON_ONCE(dl_rq->running_bw > dl_rq->this_bw);
293293
}
294294

295295
static inline

kernel/sched/ext.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2341,7 +2341,7 @@ static bool task_can_run_on_remote_rq(struct task_struct *p, struct rq *rq,
23412341
{
23422342
int cpu = cpu_of(rq);
23432343

2344-
SCHED_WARN_ON(task_cpu(p) == cpu);
2344+
WARN_ON_ONCE(task_cpu(p) == cpu);
23452345

23462346
/*
23472347
* If @p has migration disabled, @p->cpus_ptr is updated to contain only

0 commit comments

Comments
 (0)