Skip to content

Commit 27538e1

Browse files
urezkipaulmckrcu
authored andcommitted
rcu/kvfree: Switch to a generic linked list API
This commit improves the readability and maintainability of the kvfree_rcu() code by switching from an open-coded linked list to the standard Linux-kernel circular doubly linked list. This patch does not introduce any functional change. Signed-off-by: Uladzislau Rezki (Sony) <[email protected]> Signed-off-by: Paul E. McKenney <[email protected]>
1 parent 04a522b commit 27538e1

File tree

1 file changed

+43
-46
lines changed

1 file changed

+43
-46
lines changed

kernel/rcu/tree.c

Lines changed: 43 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -2876,13 +2876,13 @@ EXPORT_SYMBOL_GPL(call_rcu);
28762876

28772877
/**
28782878
* struct kvfree_rcu_bulk_data - single block to store kvfree_rcu() pointers
2879+
* @list: List node. All blocks are linked between each other
28792880
* @nr_records: Number of active pointers in the array
2880-
* @next: Next bulk object in the block chain
28812881
* @records: Array of the kvfree_rcu() pointers
28822882
*/
28832883
struct kvfree_rcu_bulk_data {
2884+
struct list_head list;
28842885
unsigned long nr_records;
2885-
struct kvfree_rcu_bulk_data *next;
28862886
void *records[];
28872887
};
28882888

@@ -2898,21 +2898,21 @@ struct kvfree_rcu_bulk_data {
28982898
* struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests
28992899
* @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period
29002900
* @head_free: List of kfree_rcu() objects waiting for a grace period
2901-
* @bkvhead_free: Bulk-List of kvfree_rcu() objects waiting for a grace period
2901+
* @bulk_head_free: Bulk-List of kvfree_rcu() objects waiting for a grace period
29022902
* @krcp: Pointer to @kfree_rcu_cpu structure
29032903
*/
29042904

29052905
struct kfree_rcu_cpu_work {
29062906
struct rcu_work rcu_work;
29072907
struct rcu_head *head_free;
2908-
struct kvfree_rcu_bulk_data *bkvhead_free[FREE_N_CHANNELS];
2908+
struct list_head bulk_head_free[FREE_N_CHANNELS];
29092909
struct kfree_rcu_cpu *krcp;
29102910
};
29112911

29122912
/**
29132913
* struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period
29142914
* @head: List of kfree_rcu() objects not yet waiting for a grace period
2915-
* @bkvhead: Bulk-List of kvfree_rcu() objects not yet waiting for a grace period
2915+
* @bulk_head: Bulk-List of kvfree_rcu() objects not yet waiting for a grace period
29162916
* @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
29172917
* @lock: Synchronize access to this structure
29182918
* @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
@@ -2936,7 +2936,7 @@ struct kfree_rcu_cpu_work {
29362936
*/
29372937
struct kfree_rcu_cpu {
29382938
struct rcu_head *head;
2939-
struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS];
2939+
struct list_head bulk_head[FREE_N_CHANNELS];
29402940
struct kfree_rcu_cpu_work krw_arr[KFREE_N_BATCHES];
29412941
raw_spinlock_t lock;
29422942
struct delayed_work monitor_work;
@@ -3031,12 +3031,13 @@ drain_page_cache(struct kfree_rcu_cpu *krcp)
30313031

30323032
/*
30333033
* This function is invoked in workqueue context after a grace period.
3034-
* It frees all the objects queued on ->bkvhead_free or ->head_free.
3034+
* It frees all the objects queued on ->bulk_head_free or ->head_free.
30353035
*/
30363036
static void kfree_rcu_work(struct work_struct *work)
30373037
{
30383038
unsigned long flags;
3039-
struct kvfree_rcu_bulk_data *bkvhead[FREE_N_CHANNELS], *bnext;
3039+
struct kvfree_rcu_bulk_data *bnode, *n;
3040+
struct list_head bulk_head[FREE_N_CHANNELS];
30403041
struct rcu_head *head, *next;
30413042
struct kfree_rcu_cpu *krcp;
30423043
struct kfree_rcu_cpu_work *krwp;
@@ -3048,10 +3049,8 @@ static void kfree_rcu_work(struct work_struct *work)
30483049

30493050
raw_spin_lock_irqsave(&krcp->lock, flags);
30503051
// Channels 1 and 2.
3051-
for (i = 0; i < FREE_N_CHANNELS; i++) {
3052-
bkvhead[i] = krwp->bkvhead_free[i];
3053-
krwp->bkvhead_free[i] = NULL;
3054-
}
3052+
for (i = 0; i < FREE_N_CHANNELS; i++)
3053+
list_replace_init(&krwp->bulk_head_free[i], &bulk_head[i]);
30553054

30563055
// Channel 3.
30573056
head = krwp->head_free;
@@ -3060,36 +3059,33 @@ static void kfree_rcu_work(struct work_struct *work)
30603059

30613060
// Handle the first two channels.
30623061
for (i = 0; i < FREE_N_CHANNELS; i++) {
3063-
for (; bkvhead[i]; bkvhead[i] = bnext) {
3064-
bnext = bkvhead[i]->next;
3065-
debug_rcu_bhead_unqueue(bkvhead[i]);
3062+
list_for_each_entry_safe(bnode, n, &bulk_head[i], list) {
3063+
debug_rcu_bhead_unqueue(bnode);
30663064

30673065
rcu_lock_acquire(&rcu_callback_map);
30683066
if (i == 0) { // kmalloc() / kfree().
30693067
trace_rcu_invoke_kfree_bulk_callback(
3070-
rcu_state.name, bkvhead[i]->nr_records,
3071-
bkvhead[i]->records);
3068+
rcu_state.name, bnode->nr_records,
3069+
bnode->records);
30723070

3073-
kfree_bulk(bkvhead[i]->nr_records,
3074-
bkvhead[i]->records);
3071+
kfree_bulk(bnode->nr_records, bnode->records);
30753072
} else { // vmalloc() / vfree().
3076-
for (j = 0; j < bkvhead[i]->nr_records; j++) {
3073+
for (j = 0; j < bnode->nr_records; j++) {
30773074
trace_rcu_invoke_kvfree_callback(
3078-
rcu_state.name,
3079-
bkvhead[i]->records[j], 0);
3075+
rcu_state.name, bnode->records[j], 0);
30803076

3081-
vfree(bkvhead[i]->records[j]);
3077+
vfree(bnode->records[j]);
30823078
}
30833079
}
30843080
rcu_lock_release(&rcu_callback_map);
30853081

30863082
raw_spin_lock_irqsave(&krcp->lock, flags);
3087-
if (put_cached_bnode(krcp, bkvhead[i]))
3088-
bkvhead[i] = NULL;
3083+
if (put_cached_bnode(krcp, bnode))
3084+
bnode = NULL;
30893085
raw_spin_unlock_irqrestore(&krcp->lock, flags);
30903086

3091-
if (bkvhead[i])
3092-
free_page((unsigned long) bkvhead[i]);
3087+
if (bnode)
3088+
free_page((unsigned long) bnode);
30933089

30943090
cond_resched_tasks_rcu_qs();
30953091
}
@@ -3125,7 +3121,7 @@ need_offload_krc(struct kfree_rcu_cpu *krcp)
31253121
int i;
31263122

31273123
for (i = 0; i < FREE_N_CHANNELS; i++)
3128-
if (krcp->bkvhead[i])
3124+
if (!list_empty(&krcp->bulk_head[i]))
31293125
return true;
31303126

31313127
return !!krcp->head;
@@ -3162,21 +3158,20 @@ static void kfree_rcu_monitor(struct work_struct *work)
31623158
for (i = 0; i < KFREE_N_BATCHES; i++) {
31633159
struct kfree_rcu_cpu_work *krwp = &(krcp->krw_arr[i]);
31643160

3165-
// Try to detach bkvhead or head and attach it over any
3161+
// Try to detach bulk_head or head and attach it over any
31663162
// available corresponding free channel. It can be that
31673163
// a previous RCU batch is in progress, it means that
31683164
// immediately to queue another one is not possible so
31693165
// in that case the monitor work is rearmed.
3170-
if ((krcp->bkvhead[0] && !krwp->bkvhead_free[0]) ||
3171-
(krcp->bkvhead[1] && !krwp->bkvhead_free[1]) ||
3166+
if ((!list_empty(&krcp->bulk_head[0]) && list_empty(&krwp->bulk_head_free[0])) ||
3167+
(!list_empty(&krcp->bulk_head[1]) && list_empty(&krwp->bulk_head_free[1])) ||
31723168
(krcp->head && !krwp->head_free)) {
3169+
31733170
// Channel 1 corresponds to the SLAB-pointer bulk path.
31743171
// Channel 2 corresponds to vmalloc-pointer bulk path.
31753172
for (j = 0; j < FREE_N_CHANNELS; j++) {
3176-
if (!krwp->bkvhead_free[j]) {
3177-
krwp->bkvhead_free[j] = krcp->bkvhead[j];
3178-
krcp->bkvhead[j] = NULL;
3179-
}
3173+
if (list_empty(&krwp->bulk_head_free[j]))
3174+
list_replace_init(&krcp->bulk_head[j], &krwp->bulk_head_free[j]);
31803175
}
31813176

31823177
// Channel 3 corresponds to both SLAB and vmalloc
@@ -3288,10 +3283,11 @@ add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
32883283
return false;
32893284

32903285
idx = !!is_vmalloc_addr(ptr);
3286+
bnode = list_first_entry_or_null(&(*krcp)->bulk_head[idx],
3287+
struct kvfree_rcu_bulk_data, list);
32913288

32923289
/* Check if a new block is required. */
3293-
if (!(*krcp)->bkvhead[idx] ||
3294-
(*krcp)->bkvhead[idx]->nr_records == KVFREE_BULK_MAX_ENTR) {
3290+
if (!bnode || bnode->nr_records == KVFREE_BULK_MAX_ENTR) {
32953291
bnode = get_cached_bnode(*krcp);
32963292
if (!bnode && can_alloc) {
32973293
krc_this_cpu_unlock(*krcp, *flags);
@@ -3315,18 +3311,13 @@ add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
33153311
if (!bnode)
33163312
return false;
33173313

3318-
/* Initialize the new block. */
3314+
// Initialize the new block and attach it.
33193315
bnode->nr_records = 0;
3320-
bnode->next = (*krcp)->bkvhead[idx];
3321-
3322-
/* Attach it to the head. */
3323-
(*krcp)->bkvhead[idx] = bnode;
3316+
list_add(&bnode->list, &(*krcp)->bulk_head[idx]);
33243317
}
33253318

33263319
/* Finally insert. */
3327-
(*krcp)->bkvhead[idx]->records
3328-
[(*krcp)->bkvhead[idx]->nr_records++] = ptr;
3329-
3320+
bnode->records[bnode->nr_records++] = ptr;
33303321
return true;
33313322
}
33323323

@@ -4761,7 +4752,7 @@ struct workqueue_struct *rcu_gp_wq;
47614752
static void __init kfree_rcu_batch_init(void)
47624753
{
47634754
int cpu;
4764-
int i;
4755+
int i, j;
47654756

47664757
/* Clamp it to [0:100] seconds interval. */
47674758
if (rcu_delay_page_cache_fill_msec < 0 ||
@@ -4781,8 +4772,14 @@ static void __init kfree_rcu_batch_init(void)
47814772
for (i = 0; i < KFREE_N_BATCHES; i++) {
47824773
INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work);
47834774
krcp->krw_arr[i].krcp = krcp;
4775+
4776+
for (j = 0; j < FREE_N_CHANNELS; j++)
4777+
INIT_LIST_HEAD(&krcp->krw_arr[i].bulk_head_free[j]);
47844778
}
47854779

4780+
for (i = 0; i < FREE_N_CHANNELS; i++)
4781+
INIT_LIST_HEAD(&krcp->bulk_head[i]);
4782+
47864783
INIT_DELAYED_WORK(&krcp->monitor_work, kfree_rcu_monitor);
47874784
INIT_DELAYED_WORK(&krcp->page_cache_work, fill_page_cache_func);
47884785
krcp->initialized = true;

0 commit comments

Comments
 (0)