Skip to content

Commit 569d767

Browse files
joelagnelpaulmckrcu
authored andcommitted
rcu: Make kfree_rcu() use a non-atomic ->monitor_todo
Because the ->monitor_todo field is always protected by krcp->lock, this commit downgrades from xchg() to non-atomic unmarked assignment statements. Signed-off-by: Joel Fernandes <[email protected]> [ paulmck: Update to include early-boot kick code. ] Signed-off-by: Paul E. McKenney <[email protected]>
1 parent e6e78b0 commit 569d767

File tree

1 file changed

+10
-6
lines changed

1 file changed

+10
-6
lines changed

kernel/rcu/tree.c

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -2708,7 +2708,7 @@ struct kfree_rcu_cpu {
27082708
struct rcu_head *head_free;
27092709
spinlock_t lock;
27102710
struct delayed_work monitor_work;
2711-
int monitor_todo;
2711+
bool monitor_todo;
27122712
bool initialized;
27132713
};
27142714

@@ -2765,15 +2765,16 @@ static inline void kfree_rcu_drain_unlock(struct kfree_rcu_cpu *krcp,
27652765
unsigned long flags)
27662766
{
27672767
// Attempt to start a new batch.
2768+
krcp->monitor_todo = false;
27682769
if (queue_kfree_rcu_work(krcp)) {
27692770
// Success! Our job is done here.
27702771
spin_unlock_irqrestore(&krcp->lock, flags);
27712772
return;
27722773
}
27732774

27742775
// Previous RCU batch still in progress, try again later.
2775-
if (!xchg(&krcp->monitor_todo, true))
2776-
schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
2776+
krcp->monitor_todo = true;
2777+
schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
27772778
spin_unlock_irqrestore(&krcp->lock, flags);
27782779
}
27792780

@@ -2788,7 +2789,7 @@ static void kfree_rcu_monitor(struct work_struct *work)
27882789
monitor_work.work);
27892790

27902791
spin_lock_irqsave(&krcp->lock, flags);
2791-
if (xchg(&krcp->monitor_todo, false))
2792+
if (krcp->monitor_todo)
27922793
kfree_rcu_drain_unlock(krcp, flags);
27932794
else
27942795
spin_unlock_irqrestore(&krcp->lock, flags);
@@ -2837,8 +2838,10 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
28372838

28382839
// Set timer to drain after KFREE_DRAIN_JIFFIES.
28392840
if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING &&
2840-
!xchg(&krcp->monitor_todo, true))
2841+
!krcp->monitor_todo) {
2842+
krcp->monitor_todo = true;
28412843
schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
2844+
}
28422845

28432846
if (krcp->initialized)
28442847
spin_unlock(&krcp->lock);
@@ -2855,10 +2858,11 @@ void __init kfree_rcu_scheduler_running(void)
28552858
struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
28562859

28572860
spin_lock_irqsave(&krcp->lock, flags);
2858-
if (!krcp->head || xchg(&krcp->monitor_todo, true)) {
2861+
if (!krcp->head || krcp->monitor_todo) {
28592862
spin_unlock_irqrestore(&krcp->lock, flags);
28602863
continue;
28612864
}
2865+
krcp->monitor_todo = true;
28622866
schedule_delayed_work(&krcp->monitor_work, KFREE_DRAIN_JIFFIES);
28632867
spin_unlock_irqrestore(&krcp->lock, flags);
28642868
}

0 commit comments

Comments
 (0)