@@ -3029,6 +3029,65 @@ drain_page_cache(struct kfree_rcu_cpu *krcp)
3029
3029
return freed ;
3030
3030
}
3031
3031
3032
+ static void
3033
+ kvfree_rcu_bulk (struct kfree_rcu_cpu * krcp ,
3034
+ struct kvfree_rcu_bulk_data * bnode , int idx )
3035
+ {
3036
+ unsigned long flags ;
3037
+ int i ;
3038
+
3039
+ debug_rcu_bhead_unqueue (bnode );
3040
+
3041
+ rcu_lock_acquire (& rcu_callback_map );
3042
+ if (idx == 0 ) { // kmalloc() / kfree().
3043
+ trace_rcu_invoke_kfree_bulk_callback (
3044
+ rcu_state .name , bnode -> nr_records ,
3045
+ bnode -> records );
3046
+
3047
+ kfree_bulk (bnode -> nr_records , bnode -> records );
3048
+ } else { // vmalloc() / vfree().
3049
+ for (i = 0 ; i < bnode -> nr_records ; i ++ ) {
3050
+ trace_rcu_invoke_kvfree_callback (
3051
+ rcu_state .name , bnode -> records [i ], 0 );
3052
+
3053
+ vfree (bnode -> records [i ]);
3054
+ }
3055
+ }
3056
+ rcu_lock_release (& rcu_callback_map );
3057
+
3058
+ raw_spin_lock_irqsave (& krcp -> lock , flags );
3059
+ if (put_cached_bnode (krcp , bnode ))
3060
+ bnode = NULL ;
3061
+ raw_spin_unlock_irqrestore (& krcp -> lock , flags );
3062
+
3063
+ if (bnode )
3064
+ free_page ((unsigned long ) bnode );
3065
+
3066
+ cond_resched_tasks_rcu_qs ();
3067
+ }
3068
+
3069
+ static void
3070
+ kvfree_rcu_list (struct rcu_head * head )
3071
+ {
3072
+ struct rcu_head * next ;
3073
+
3074
+ for (; head ; head = next ) {
3075
+ void * ptr = (void * ) head -> func ;
3076
+ unsigned long offset = (void * ) head - ptr ;
3077
+
3078
+ next = head -> next ;
3079
+ debug_rcu_head_unqueue ((struct rcu_head * )ptr );
3080
+ rcu_lock_acquire (& rcu_callback_map );
3081
+ trace_rcu_invoke_kvfree_callback (rcu_state .name , head , offset );
3082
+
3083
+ if (!WARN_ON_ONCE (!__is_kvfree_rcu_offset (offset )))
3084
+ kvfree (ptr );
3085
+
3086
+ rcu_lock_release (& rcu_callback_map );
3087
+ cond_resched_tasks_rcu_qs ();
3088
+ }
3089
+ }
3090
+
3032
3091
/*
3033
3092
* This function is invoked in workqueue context after a grace period.
3034
3093
* It frees all the objects queued on ->bulk_head_free or ->head_free.
@@ -3038,10 +3097,10 @@ static void kfree_rcu_work(struct work_struct *work)
3038
3097
unsigned long flags ;
3039
3098
struct kvfree_rcu_bulk_data * bnode , * n ;
3040
3099
struct list_head bulk_head [FREE_N_CHANNELS ];
3041
- struct rcu_head * head , * next ;
3100
+ struct rcu_head * head ;
3042
3101
struct kfree_rcu_cpu * krcp ;
3043
3102
struct kfree_rcu_cpu_work * krwp ;
3044
- int i , j ;
3103
+ int i ;
3045
3104
3046
3105
krwp = container_of (to_rcu_work (work ),
3047
3106
struct kfree_rcu_cpu_work , rcu_work );
@@ -3058,38 +3117,9 @@ static void kfree_rcu_work(struct work_struct *work)
3058
3117
raw_spin_unlock_irqrestore (& krcp -> lock , flags );
3059
3118
3060
3119
// Handle the first two channels.
3061
- for (i = 0 ; i < FREE_N_CHANNELS ; i ++ ) {
3062
- list_for_each_entry_safe (bnode , n , & bulk_head [i ], list ) {
3063
- debug_rcu_bhead_unqueue (bnode );
3064
-
3065
- rcu_lock_acquire (& rcu_callback_map );
3066
- if (i == 0 ) { // kmalloc() / kfree().
3067
- trace_rcu_invoke_kfree_bulk_callback (
3068
- rcu_state .name , bnode -> nr_records ,
3069
- bnode -> records );
3070
-
3071
- kfree_bulk (bnode -> nr_records , bnode -> records );
3072
- } else { // vmalloc() / vfree().
3073
- for (j = 0 ; j < bnode -> nr_records ; j ++ ) {
3074
- trace_rcu_invoke_kvfree_callback (
3075
- rcu_state .name , bnode -> records [j ], 0 );
3076
-
3077
- vfree (bnode -> records [j ]);
3078
- }
3079
- }
3080
- rcu_lock_release (& rcu_callback_map );
3081
-
3082
- raw_spin_lock_irqsave (& krcp -> lock , flags );
3083
- if (put_cached_bnode (krcp , bnode ))
3084
- bnode = NULL ;
3085
- raw_spin_unlock_irqrestore (& krcp -> lock , flags );
3086
-
3087
- if (bnode )
3088
- free_page ((unsigned long ) bnode );
3089
-
3090
- cond_resched_tasks_rcu_qs ();
3091
- }
3092
- }
3120
+ for (i = 0 ; i < FREE_N_CHANNELS ; i ++ )
3121
+ list_for_each_entry_safe (bnode , n , & bulk_head [i ], list )
3122
+ kvfree_rcu_bulk (krcp , bnode , i );
3093
3123
3094
3124
/*
3095
3125
* This is used when the "bulk" path can not be used for the
@@ -3098,21 +3128,7 @@ static void kfree_rcu_work(struct work_struct *work)
3098
3128
* queued on a linked list through their rcu_head structures.
3099
3129
* This list is named "Channel 3".
3100
3130
*/
3101
- for (; head ; head = next ) {
3102
- void * ptr = (void * ) head -> func ;
3103
- unsigned long offset = (void * ) head - ptr ;
3104
-
3105
- next = head -> next ;
3106
- debug_rcu_head_unqueue ((struct rcu_head * )ptr );
3107
- rcu_lock_acquire (& rcu_callback_map );
3108
- trace_rcu_invoke_kvfree_callback (rcu_state .name , head , offset );
3109
-
3110
- if (!WARN_ON_ONCE (!__is_kvfree_rcu_offset (offset )))
3111
- kvfree (ptr );
3112
-
3113
- rcu_lock_release (& rcu_callback_map );
3114
- cond_resched_tasks_rcu_qs ();
3115
- }
3131
+ kvfree_rcu_list (head );
3116
3132
}
3117
3133
3118
3134
static bool
0 commit comments