Skip to content

Commit 8c15a9e

Browse files
urezkipaulmckrcu
authored andcommitted
rcu/kvfree: Move bulk/list reclaim to separate functions
The kvfree_rcu() code maintains lists of pages of pointers, but also a singly linked list, with the latter being used when memory allocation fails. Traversal of these two types of lists is currently open coded. This commit simplifies the code by providing kvfree_rcu_bulk() and kvfree_rcu_list() functions, respectively, to traverse these two types of lists. This patch does not introduce any functional change. Signed-off-by: Uladzislau Rezki (Sony) <[email protected]> Signed-off-by: Paul E. McKenney <[email protected]>
1 parent 27538e1 commit 8c15a9e

File tree

1 file changed

+65
-49
lines changed

1 file changed

+65
-49
lines changed

kernel/rcu/tree.c

Lines changed: 65 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -3029,6 +3029,65 @@ drain_page_cache(struct kfree_rcu_cpu *krcp)
30293029
return freed;
30303030
}
30313031

3032+
static void
3033+
kvfree_rcu_bulk(struct kfree_rcu_cpu *krcp,
3034+
struct kvfree_rcu_bulk_data *bnode, int idx)
3035+
{
3036+
unsigned long flags;
3037+
int i;
3038+
3039+
debug_rcu_bhead_unqueue(bnode);
3040+
3041+
rcu_lock_acquire(&rcu_callback_map);
3042+
if (idx == 0) { // kmalloc() / kfree().
3043+
trace_rcu_invoke_kfree_bulk_callback(
3044+
rcu_state.name, bnode->nr_records,
3045+
bnode->records);
3046+
3047+
kfree_bulk(bnode->nr_records, bnode->records);
3048+
} else { // vmalloc() / vfree().
3049+
for (i = 0; i < bnode->nr_records; i++) {
3050+
trace_rcu_invoke_kvfree_callback(
3051+
rcu_state.name, bnode->records[i], 0);
3052+
3053+
vfree(bnode->records[i]);
3054+
}
3055+
}
3056+
rcu_lock_release(&rcu_callback_map);
3057+
3058+
raw_spin_lock_irqsave(&krcp->lock, flags);
3059+
if (put_cached_bnode(krcp, bnode))
3060+
bnode = NULL;
3061+
raw_spin_unlock_irqrestore(&krcp->lock, flags);
3062+
3063+
if (bnode)
3064+
free_page((unsigned long) bnode);
3065+
3066+
cond_resched_tasks_rcu_qs();
3067+
}
3068+
3069+
static void
3070+
kvfree_rcu_list(struct rcu_head *head)
3071+
{
3072+
struct rcu_head *next;
3073+
3074+
for (; head; head = next) {
3075+
void *ptr = (void *) head->func;
3076+
unsigned long offset = (void *) head - ptr;
3077+
3078+
next = head->next;
3079+
debug_rcu_head_unqueue((struct rcu_head *)ptr);
3080+
rcu_lock_acquire(&rcu_callback_map);
3081+
trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset);
3082+
3083+
if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset)))
3084+
kvfree(ptr);
3085+
3086+
rcu_lock_release(&rcu_callback_map);
3087+
cond_resched_tasks_rcu_qs();
3088+
}
3089+
}
3090+
30323091
/*
30333092
* This function is invoked in workqueue context after a grace period.
30343093
* It frees all the objects queued on ->bulk_head_free or ->head_free.
@@ -3038,10 +3097,10 @@ static void kfree_rcu_work(struct work_struct *work)
30383097
unsigned long flags;
30393098
struct kvfree_rcu_bulk_data *bnode, *n;
30403099
struct list_head bulk_head[FREE_N_CHANNELS];
3041-
struct rcu_head *head, *next;
3100+
struct rcu_head *head;
30423101
struct kfree_rcu_cpu *krcp;
30433102
struct kfree_rcu_cpu_work *krwp;
3044-
int i, j;
3103+
int i;
30453104

30463105
krwp = container_of(to_rcu_work(work),
30473106
struct kfree_rcu_cpu_work, rcu_work);
@@ -3058,38 +3117,9 @@ static void kfree_rcu_work(struct work_struct *work)
30583117
raw_spin_unlock_irqrestore(&krcp->lock, flags);
30593118

30603119
// Handle the first two channels.
3061-
for (i = 0; i < FREE_N_CHANNELS; i++) {
3062-
list_for_each_entry_safe(bnode, n, &bulk_head[i], list) {
3063-
debug_rcu_bhead_unqueue(bnode);
3064-
3065-
rcu_lock_acquire(&rcu_callback_map);
3066-
if (i == 0) { // kmalloc() / kfree().
3067-
trace_rcu_invoke_kfree_bulk_callback(
3068-
rcu_state.name, bnode->nr_records,
3069-
bnode->records);
3070-
3071-
kfree_bulk(bnode->nr_records, bnode->records);
3072-
} else { // vmalloc() / vfree().
3073-
for (j = 0; j < bnode->nr_records; j++) {
3074-
trace_rcu_invoke_kvfree_callback(
3075-
rcu_state.name, bnode->records[j], 0);
3076-
3077-
vfree(bnode->records[j]);
3078-
}
3079-
}
3080-
rcu_lock_release(&rcu_callback_map);
3081-
3082-
raw_spin_lock_irqsave(&krcp->lock, flags);
3083-
if (put_cached_bnode(krcp, bnode))
3084-
bnode = NULL;
3085-
raw_spin_unlock_irqrestore(&krcp->lock, flags);
3086-
3087-
if (bnode)
3088-
free_page((unsigned long) bnode);
3089-
3090-
cond_resched_tasks_rcu_qs();
3091-
}
3092-
}
3120+
for (i = 0; i < FREE_N_CHANNELS; i++)
3121+
list_for_each_entry_safe(bnode, n, &bulk_head[i], list)
3122+
kvfree_rcu_bulk(krcp, bnode, i);
30933123

30943124
/*
30953125
* This is used when the "bulk" path can not be used for the
@@ -3098,21 +3128,7 @@ static void kfree_rcu_work(struct work_struct *work)
30983128
* queued on a linked list through their rcu_head structures.
30993129
* This list is named "Channel 3".
31003130
*/
3101-
for (; head; head = next) {
3102-
void *ptr = (void *) head->func;
3103-
unsigned long offset = (void *) head - ptr;
3104-
3105-
next = head->next;
3106-
debug_rcu_head_unqueue((struct rcu_head *)ptr);
3107-
rcu_lock_acquire(&rcu_callback_map);
3108-
trace_rcu_invoke_kvfree_callback(rcu_state.name, head, offset);
3109-
3110-
if (!WARN_ON_ONCE(!__is_kvfree_rcu_offset(offset)))
3111-
kvfree(ptr);
3112-
3113-
rcu_lock_release(&rcu_callback_map);
3114-
cond_resched_tasks_rcu_qs();
3115-
}
3131+
kvfree_rcu_list(head);
31163132
}
31173133

31183134
static bool

0 commit comments

Comments
 (0)