Skip to content

Commit 62dcbab

Browse files
committed
sched_ext: Avoid live-locking bypass mode switching
A poorly behaving BPF scheduler can live-lock the system by e.g. incessantly banging on the same DSQ on a large NUMA system to the point where switching to the bypass mode can take a long time. Turning on the bypass mode requires dequeueing and re-enqueueing currently runnable tasks, if the DSQs that they are on are live-locked, this can take tens of seconds cascading into other failures. This was observed on 2 x Intel Sapphire Rapids machines with 224 logical CPUs. Inject artifical delays while the bypass mode is switching to guarantee timely completion. While at it, move __scx_ops_bypass_lock into scx_ops_bypass() and rename it to bypass_lock. Signed-off-by: Tejun Heo <[email protected]> Reported-by: Valentin Andrei <[email protected]> Reported-by: Patrick Lu <[email protected]>
1 parent f07b806 commit 62dcbab

File tree

1 file changed

+52
-3
lines changed

1 file changed

+52
-3
lines changed

kernel/sched/ext.c

Lines changed: 52 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -867,8 +867,8 @@ static DEFINE_MUTEX(scx_ops_enable_mutex);
867867
DEFINE_STATIC_KEY_FALSE(__scx_ops_enabled);
868868
DEFINE_STATIC_PERCPU_RWSEM(scx_fork_rwsem);
869869
static atomic_t scx_ops_enable_state_var = ATOMIC_INIT(SCX_OPS_DISABLED);
870+
static atomic_t scx_ops_breather_depth = ATOMIC_INIT(0);
870871
static int scx_ops_bypass_depth;
871-
static DEFINE_RAW_SPINLOCK(__scx_ops_bypass_lock);
872872
static bool scx_ops_init_task_enabled;
873873
static bool scx_switching_all;
874874
DEFINE_STATIC_KEY_FALSE(__scx_switched_all);
@@ -2474,10 +2474,47 @@ static struct rq *move_task_between_dsqs(struct task_struct *p, u64 enq_flags,
24742474
return dst_rq;
24752475
}
24762476

2477+
/*
2478+
* A poorly behaving BPF scheduler can live-lock the system by e.g. incessantly
2479+
* banging on the same DSQ on a large NUMA system to the point where switching
2480+
* to the bypass mode can take a long time. Inject artifical delays while the
2481+
* bypass mode is switching to guarantee timely completion.
2482+
*/
2483+
static void scx_ops_breather(struct rq *rq)
2484+
{
2485+
u64 until;
2486+
2487+
lockdep_assert_rq_held(rq);
2488+
2489+
if (likely(!atomic_read(&scx_ops_breather_depth)))
2490+
return;
2491+
2492+
raw_spin_rq_unlock(rq);
2493+
2494+
until = ktime_get_ns() + NSEC_PER_MSEC;
2495+
2496+
do {
2497+
int cnt = 1024;
2498+
while (atomic_read(&scx_ops_breather_depth) && --cnt)
2499+
cpu_relax();
2500+
} while (atomic_read(&scx_ops_breather_depth) &&
2501+
time_before64(ktime_get_ns(), until));
2502+
2503+
raw_spin_rq_lock(rq);
2504+
}
2505+
24772506
static bool consume_dispatch_q(struct rq *rq, struct scx_dispatch_q *dsq)
24782507
{
24792508
struct task_struct *p;
24802509
retry:
2510+
/*
2511+
* This retry loop can repeatedly race against scx_ops_bypass()
2512+
* dequeueing tasks from @dsq trying to put the system into the bypass
2513+
* mode. On some multi-socket machines (e.g. 2x Intel 8480c), this can
2514+
* live-lock the machine into soft lockups. Give a breather.
2515+
*/
2516+
scx_ops_breather(rq);
2517+
24812518
/*
24822519
* The caller can't expect to successfully consume a task if the task's
24832520
* addition to @dsq isn't guaranteed to be visible somehow. Test
@@ -4609,10 +4646,11 @@ bool task_should_scx(struct task_struct *p)
46094646
*/
46104647
static void scx_ops_bypass(bool bypass)
46114648
{
4649+
static DEFINE_RAW_SPINLOCK(bypass_lock);
46124650
int cpu;
46134651
unsigned long flags;
46144652

4615-
raw_spin_lock_irqsave(&__scx_ops_bypass_lock, flags);
4653+
raw_spin_lock_irqsave(&bypass_lock, flags);
46164654
if (bypass) {
46174655
scx_ops_bypass_depth++;
46184656
WARN_ON_ONCE(scx_ops_bypass_depth <= 0);
@@ -4625,6 +4663,8 @@ static void scx_ops_bypass(bool bypass)
46254663
goto unlock;
46264664
}
46274665

4666+
atomic_inc(&scx_ops_breather_depth);
4667+
46284668
/*
46294669
* No task property is changing. We just need to make sure all currently
46304670
* queued tasks are re-queued according to the new scx_rq_bypassing()
@@ -4680,8 +4720,10 @@ static void scx_ops_bypass(bool bypass)
46804720
/* resched to restore ticks and idle state */
46814721
resched_cpu(cpu);
46824722
}
4723+
4724+
atomic_dec(&scx_ops_breather_depth);
46834725
unlock:
4684-
raw_spin_unlock_irqrestore(&__scx_ops_bypass_lock, flags);
4726+
raw_spin_unlock_irqrestore(&bypass_lock, flags);
46854727
}
46864728

46874729
static void free_exit_info(struct scx_exit_info *ei)
@@ -6334,6 +6376,13 @@ static bool scx_dispatch_from_dsq(struct bpf_iter_scx_dsq_kern *kit,
63346376
raw_spin_rq_lock(src_rq);
63356377
}
63366378

6379+
/*
6380+
* If the BPF scheduler keeps calling this function repeatedly, it can
6381+
* cause similar live-lock conditions as consume_dispatch_q(). Insert a
6382+
* breather if necessary.
6383+
*/
6384+
scx_ops_breather(src_rq);
6385+
63376386
locked_rq = src_rq;
63386387
raw_spin_lock(&src_dsq->lock);
63396388

0 commit comments

Comments
 (0)