Skip to content

Commit 26a8b12

Browse files
changhuaixinIngo Molnar
authored andcommitted
sched/fair: Fix race between runtime distribution and assignment
Currently, there is a potential race between distribute_cfs_runtime() and assign_cfs_rq_runtime(). Race happens when cfs_b->runtime is read, distributes without holding lock and finds out there is not enough runtime to charge against after distribution. Because assign_cfs_rq_runtime() might be called during distribution, and use cfs_b->runtime at the same time. Fibtest is the tool to test this race. Assume all gcfs_rq is throttled and cfs period timer runs, slow threads might run and sleep, returning unused cfs_rq runtime and keeping min_cfs_rq_runtime in their local pool. If all this happens sufficiently quickly, cfs_b->runtime will drop a lot. If runtime distributed is large too, over-use of runtime happens. A runtime over-using by about 70 percent of quota is seen when we test fibtest on a 96-core machine. We run fibtest with 1 fast thread and 95 slow threads in test group, configure 10ms quota for this group and see the CPU usage of fibtest is 17.0%, which is far more than the expected 10%. On a smaller machine with 32 cores, we also run fibtest with 96 threads. CPU usage is more than 12%, which is also more than expected 10%. This shows that on similar workloads, this race do affect CPU bandwidth control. Solve this by holding lock inside distribute_cfs_runtime(). Fixes: c06f04c ("sched: Fix potential near-infinite distribute_cfs_runtime() loop") Reviewed-by: Ben Segall <[email protected]> Signed-off-by: Huaixin Chang <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Signed-off-by: Ingo Molnar <[email protected]> Link: https://lore.kernel.org/lkml/[email protected]/
1 parent d76343c commit 26a8b12

File tree

1 file changed

+11
-20
lines changed

1 file changed

+11
-20
lines changed

kernel/sched/fair.c

Lines changed: 11 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -4836,11 +4836,10 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
48364836
resched_curr(rq);
48374837
}
48384838

4839-
static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining)
4839+
static void distribute_cfs_runtime(struct cfs_bandwidth *cfs_b)
48404840
{
48414841
struct cfs_rq *cfs_rq;
4842-
u64 runtime;
4843-
u64 starting_runtime = remaining;
4842+
u64 runtime, remaining = 1;
48444843

48454844
rcu_read_lock();
48464845
list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
@@ -4855,10 +4854,13 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining)
48554854
/* By the above check, this should never be true */
48564855
SCHED_WARN_ON(cfs_rq->runtime_remaining > 0);
48574856

4857+
raw_spin_lock(&cfs_b->lock);
48584858
runtime = -cfs_rq->runtime_remaining + 1;
4859-
if (runtime > remaining)
4860-
runtime = remaining;
4861-
remaining -= runtime;
4859+
if (runtime > cfs_b->runtime)
4860+
runtime = cfs_b->runtime;
4861+
cfs_b->runtime -= runtime;
4862+
remaining = cfs_b->runtime;
4863+
raw_spin_unlock(&cfs_b->lock);
48624864

48634865
cfs_rq->runtime_remaining += runtime;
48644866

@@ -4873,8 +4875,6 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining)
48734875
break;
48744876
}
48754877
rcu_read_unlock();
4876-
4877-
return starting_runtime - remaining;
48784878
}
48794879

48804880
/*
@@ -4885,7 +4885,6 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, u64 remaining)
48854885
*/
48864886
static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, unsigned long flags)
48874887
{
4888-
u64 runtime;
48894888
int throttled;
48904889

48914890
/* no need to continue the timer with no bandwidth constraint */
@@ -4914,24 +4913,17 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, u
49144913
cfs_b->nr_throttled += overrun;
49154914

49164915
/*
4917-
* This check is repeated as we are holding onto the new bandwidth while
4918-
* we unthrottle. This can potentially race with an unthrottled group
4919-
* trying to acquire new bandwidth from the global pool. This can result
4920-
* in us over-using our runtime if it is all used during this loop, but
4921-
* only by limited amounts in that extreme case.
4916+
* This check is repeated as we release cfs_b->lock while we unthrottle.
49224917
*/
49234918
while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) {
4924-
runtime = cfs_b->runtime;
49254919
cfs_b->distribute_running = 1;
49264920
raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
49274921
/* we can't nest cfs_b->lock while distributing bandwidth */
4928-
runtime = distribute_cfs_runtime(cfs_b, runtime);
4922+
distribute_cfs_runtime(cfs_b);
49294923
raw_spin_lock_irqsave(&cfs_b->lock, flags);
49304924

49314925
cfs_b->distribute_running = 0;
49324926
throttled = !list_empty(&cfs_b->throttled_cfs_rq);
4933-
4934-
lsub_positive(&cfs_b->runtime, runtime);
49354927
}
49364928

49374929
/*
@@ -5065,10 +5057,9 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
50655057
if (!runtime)
50665058
return;
50675059

5068-
runtime = distribute_cfs_runtime(cfs_b, runtime);
5060+
distribute_cfs_runtime(cfs_b);
50695061

50705062
raw_spin_lock_irqsave(&cfs_b->lock, flags);
5071-
lsub_positive(&cfs_b->runtime, runtime);
50725063
cfs_b->distribute_running = 0;
50735064
raw_spin_unlock_irqrestore(&cfs_b->lock, flags);
50745065
}

0 commit comments

Comments
 (0)