Skip to content

Commit 6f94856

Browse files
joelagnelpaulmckrcu
authored andcommitted
rcu/tree: Reduce wake up for synchronize_rcu() common case
In the synchronize_rcu() common case, we will have less than SR_MAX_USERS_WAKE_FROM_GP number of users per GP. Waking up the kworker is pointless just to free the last injected wait head since at that point, all the users have already been awakened. Introduce a new counter to track this and prevent the wakeup in the common case. [ paulmck: Remove atomic_dec_return_release in cannot-happen state. ] Signed-off-by: Joel Fernandes (Google) <[email protected]> Reviewed-by: Uladzislau Rezki (Sony) <[email protected]> Signed-off-by: Paul E. McKenney <[email protected]>
1 parent 32d9959 commit 6f94856

File tree

2 files changed

+27
-4
lines changed

2 files changed

+27
-4
lines changed

kernel/rcu/tree.c

Lines changed: 26 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,7 @@ static struct rcu_state rcu_state = {
9696
.ofl_lock = __ARCH_SPIN_LOCK_UNLOCKED,
9797
.srs_cleanup_work = __WORK_INITIALIZER(rcu_state.srs_cleanup_work,
9898
rcu_sr_normal_gp_cleanup_work),
99+
.srs_cleanups_pending = ATOMIC_INIT(0),
99100
};
100101

101102
/* Dump rcu_node combining tree at boot to verify correct setup. */
@@ -1660,14 +1661,17 @@ static void rcu_sr_normal_gp_cleanup_work(struct work_struct *work)
16601661

16611662
rcu_sr_put_wait_head(rcu);
16621663
}
1664+
1665+
/* Order list manipulations with atomic access. */
1666+
atomic_dec_return_release(&rcu_state.srs_cleanups_pending);
16631667
}
16641668

16651669
/*
16661670
* Helper function for rcu_gp_cleanup().
16671671
*/
16681672
static void rcu_sr_normal_gp_cleanup(void)
16691673
{
1670-
struct llist_node *wait_tail, *next, *rcu;
1674+
struct llist_node *wait_tail, *next = NULL, *rcu = NULL;
16711675
int done = 0;
16721676

16731677
wait_tail = rcu_state.srs_wait_tail;
@@ -1693,16 +1697,34 @@ static void rcu_sr_normal_gp_cleanup(void)
16931697
break;
16941698
}
16951699

1696-
// concurrent sr_normal_gp_cleanup work might observe this update.
1697-
smp_store_release(&rcu_state.srs_done_tail, wait_tail);
1700+
/*
1701+
* Fast path, no more users to process except putting the second last
1702+
* wait head if no inflight-workers. If there are in-flight workers,
1703+
* they will remove the last wait head.
1704+
*
1705+
* Note that the ACQUIRE orders atomic access with list manipulation.
1706+
*/
1707+
if (wait_tail->next && wait_tail->next->next == NULL &&
1708+
rcu_sr_is_wait_head(wait_tail->next) &&
1709+
!atomic_read_acquire(&rcu_state.srs_cleanups_pending)) {
1710+
rcu_sr_put_wait_head(wait_tail->next);
1711+
wait_tail->next = NULL;
1712+
}
1713+
1714+
/* Concurrent sr_normal_gp_cleanup work might observe this update. */
16981715
ASSERT_EXCLUSIVE_WRITER(rcu_state.srs_done_tail);
1716+
smp_store_release(&rcu_state.srs_done_tail, wait_tail);
16991717

17001718
/*
17011719
* We schedule a work in order to perform a final processing
17021720
* of outstanding users(if still left) and releasing wait-heads
17031721
* added by rcu_sr_normal_gp_init() call.
17041722
*/
1705-
queue_work(sync_wq, &rcu_state.srs_cleanup_work);
1723+
if (wait_tail->next) {
1724+
atomic_inc(&rcu_state.srs_cleanups_pending);
1725+
if (!queue_work(sync_wq, &rcu_state.srs_cleanup_work))
1726+
atomic_dec(&rcu_state.srs_cleanups_pending);
1727+
}
17061728
}
17071729

17081730
/*

kernel/rcu/tree.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -420,6 +420,7 @@ struct rcu_state {
420420
struct llist_node *srs_done_tail; /* ready for GP users. */
421421
struct sr_wait_node srs_wait_nodes[SR_NORMAL_GP_WAIT_HEAD_MAX];
422422
struct work_struct srs_cleanup_work;
423+
atomic_t srs_cleanups_pending; /* srs inflight worker cleanups. */
423424
};
424425

425426
/* Values for rcu_state structure's gp_flags field. */

0 commit comments

Comments
 (0)