@@ -2876,13 +2876,13 @@ EXPORT_SYMBOL_GPL(call_rcu);
2876
2876
2877
2877
/**
2878
2878
* struct kvfree_rcu_bulk_data - single block to store kvfree_rcu() pointers
2879
+ * @list: List node. All blocks are linked between each other
2879
2880
* @nr_records: Number of active pointers in the array
2880
- * @next: Next bulk object in the block chain
2881
2881
* @records: Array of the kvfree_rcu() pointers
2882
2882
*/
2883
2883
struct kvfree_rcu_bulk_data {
2884
+ struct list_head list ;
2884
2885
unsigned long nr_records ;
2885
- struct kvfree_rcu_bulk_data * next ;
2886
2886
void * records [];
2887
2887
};
2888
2888
@@ -2898,21 +2898,21 @@ struct kvfree_rcu_bulk_data {
2898
2898
* struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests
2899
2899
* @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period
2900
2900
* @head_free: List of kfree_rcu() objects waiting for a grace period
2901
- * @bkvhead_free : Bulk-List of kvfree_rcu() objects waiting for a grace period
2901
+ * @bulk_head_free : Bulk-List of kvfree_rcu() objects waiting for a grace period
2902
2902
* @krcp: Pointer to @kfree_rcu_cpu structure
2903
2903
*/
2904
2904
2905
2905
struct kfree_rcu_cpu_work {
2906
2906
struct rcu_work rcu_work ;
2907
2907
struct rcu_head * head_free ;
2908
- struct kvfree_rcu_bulk_data * bkvhead_free [FREE_N_CHANNELS ];
2908
+ struct list_head bulk_head_free [FREE_N_CHANNELS ];
2909
2909
struct kfree_rcu_cpu * krcp ;
2910
2910
};
2911
2911
2912
2912
/**
2913
2913
* struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period
2914
2914
* @head: List of kfree_rcu() objects not yet waiting for a grace period
2915
- * @bkvhead : Bulk-List of kvfree_rcu() objects not yet waiting for a grace period
2915
+ * @bulk_head : Bulk-List of kvfree_rcu() objects not yet waiting for a grace period
2916
2916
* @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
2917
2917
* @lock: Synchronize access to this structure
2918
2918
* @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
@@ -2936,7 +2936,7 @@ struct kfree_rcu_cpu_work {
2936
2936
*/
2937
2937
struct kfree_rcu_cpu {
2938
2938
struct rcu_head * head ;
2939
- struct kvfree_rcu_bulk_data * bkvhead [FREE_N_CHANNELS ];
2939
+ struct list_head bulk_head [FREE_N_CHANNELS ];
2940
2940
struct kfree_rcu_cpu_work krw_arr [KFREE_N_BATCHES ];
2941
2941
raw_spinlock_t lock ;
2942
2942
struct delayed_work monitor_work ;
@@ -3031,12 +3031,13 @@ drain_page_cache(struct kfree_rcu_cpu *krcp)
3031
3031
3032
3032
/*
3033
3033
* This function is invoked in workqueue context after a grace period.
3034
- * It frees all the objects queued on ->bkvhead_free or ->head_free.
3034
+ * It frees all the objects queued on ->bulk_head_free or ->head_free.
3035
3035
*/
3036
3036
static void kfree_rcu_work (struct work_struct * work )
3037
3037
{
3038
3038
unsigned long flags ;
3039
- struct kvfree_rcu_bulk_data * bkvhead [FREE_N_CHANNELS ], * bnext ;
3039
+ struct kvfree_rcu_bulk_data * bnode , * n ;
3040
+ struct list_head bulk_head [FREE_N_CHANNELS ];
3040
3041
struct rcu_head * head , * next ;
3041
3042
struct kfree_rcu_cpu * krcp ;
3042
3043
struct kfree_rcu_cpu_work * krwp ;
@@ -3048,10 +3049,8 @@ static void kfree_rcu_work(struct work_struct *work)
3048
3049
3049
3050
raw_spin_lock_irqsave (& krcp -> lock , flags );
3050
3051
// Channels 1 and 2.
3051
- for (i = 0 ; i < FREE_N_CHANNELS ; i ++ ) {
3052
- bkvhead [i ] = krwp -> bkvhead_free [i ];
3053
- krwp -> bkvhead_free [i ] = NULL ;
3054
- }
3052
+ for (i = 0 ; i < FREE_N_CHANNELS ; i ++ )
3053
+ list_replace_init (& krwp -> bulk_head_free [i ], & bulk_head [i ]);
3055
3054
3056
3055
// Channel 3.
3057
3056
head = krwp -> head_free ;
@@ -3060,36 +3059,33 @@ static void kfree_rcu_work(struct work_struct *work)
3060
3059
3061
3060
// Handle the first two channels.
3062
3061
for (i = 0 ; i < FREE_N_CHANNELS ; i ++ ) {
3063
- for (; bkvhead [i ]; bkvhead [i ] = bnext ) {
3064
- bnext = bkvhead [i ]-> next ;
3065
- debug_rcu_bhead_unqueue (bkvhead [i ]);
3062
+ list_for_each_entry_safe (bnode , n , & bulk_head [i ], list ) {
3063
+ debug_rcu_bhead_unqueue (bnode );
3066
3064
3067
3065
rcu_lock_acquire (& rcu_callback_map );
3068
3066
if (i == 0 ) { // kmalloc() / kfree().
3069
3067
trace_rcu_invoke_kfree_bulk_callback (
3070
- rcu_state .name , bkvhead [ i ] -> nr_records ,
3071
- bkvhead [ i ] -> records );
3068
+ rcu_state .name , bnode -> nr_records ,
3069
+ bnode -> records );
3072
3070
3073
- kfree_bulk (bkvhead [i ]-> nr_records ,
3074
- bkvhead [i ]-> records );
3071
+ kfree_bulk (bnode -> nr_records , bnode -> records );
3075
3072
} else { // vmalloc() / vfree().
3076
- for (j = 0 ; j < bkvhead [ i ] -> nr_records ; j ++ ) {
3073
+ for (j = 0 ; j < bnode -> nr_records ; j ++ ) {
3077
3074
trace_rcu_invoke_kvfree_callback (
3078
- rcu_state .name ,
3079
- bkvhead [i ]-> records [j ], 0 );
3075
+ rcu_state .name , bnode -> records [j ], 0 );
3080
3076
3081
- vfree (bkvhead [ i ] -> records [j ]);
3077
+ vfree (bnode -> records [j ]);
3082
3078
}
3083
3079
}
3084
3080
rcu_lock_release (& rcu_callback_map );
3085
3081
3086
3082
raw_spin_lock_irqsave (& krcp -> lock , flags );
3087
- if (put_cached_bnode (krcp , bkvhead [ i ] ))
3088
- bkvhead [ i ] = NULL ;
3083
+ if (put_cached_bnode (krcp , bnode ))
3084
+ bnode = NULL ;
3089
3085
raw_spin_unlock_irqrestore (& krcp -> lock , flags );
3090
3086
3091
- if (bkvhead [ i ] )
3092
- free_page ((unsigned long ) bkvhead [ i ] );
3087
+ if (bnode )
3088
+ free_page ((unsigned long ) bnode );
3093
3089
3094
3090
cond_resched_tasks_rcu_qs ();
3095
3091
}
@@ -3125,7 +3121,7 @@ need_offload_krc(struct kfree_rcu_cpu *krcp)
3125
3121
int i ;
3126
3122
3127
3123
for (i = 0 ; i < FREE_N_CHANNELS ; i ++ )
3128
- if (krcp -> bkvhead [i ])
3124
+ if (! list_empty ( & krcp -> bulk_head [i ]) )
3129
3125
return true;
3130
3126
3131
3127
return !!krcp -> head ;
@@ -3162,21 +3158,20 @@ static void kfree_rcu_monitor(struct work_struct *work)
3162
3158
for (i = 0 ; i < KFREE_N_BATCHES ; i ++ ) {
3163
3159
struct kfree_rcu_cpu_work * krwp = & (krcp -> krw_arr [i ]);
3164
3160
3165
- // Try to detach bkvhead or head and attach it over any
3161
+ // Try to detach bulk_head or head and attach it over any
3166
3162
// available corresponding free channel. It can be that
3167
3163
// a previous RCU batch is in progress, it means that
3168
3164
// immediately to queue another one is not possible so
3169
3165
// in that case the monitor work is rearmed.
3170
- if ((krcp -> bkvhead [0 ] && ! krwp -> bkvhead_free [0 ]) ||
3171
- (krcp -> bkvhead [1 ] && ! krwp -> bkvhead_free [1 ]) ||
3166
+ if ((! list_empty ( & krcp -> bulk_head [0 ]) && list_empty ( & krwp -> bulk_head_free [0 ]) ) ||
3167
+ (! list_empty ( & krcp -> bulk_head [1 ]) && list_empty ( & krwp -> bulk_head_free [1 ]) ) ||
3172
3168
(krcp -> head && !krwp -> head_free )) {
3169
+
3173
3170
// Channel 1 corresponds to the SLAB-pointer bulk path.
3174
3171
// Channel 2 corresponds to vmalloc-pointer bulk path.
3175
3172
for (j = 0 ; j < FREE_N_CHANNELS ; j ++ ) {
3176
- if (!krwp -> bkvhead_free [j ]) {
3177
- krwp -> bkvhead_free [j ] = krcp -> bkvhead [j ];
3178
- krcp -> bkvhead [j ] = NULL ;
3179
- }
3173
+ if (list_empty (& krwp -> bulk_head_free [j ]))
3174
+ list_replace_init (& krcp -> bulk_head [j ], & krwp -> bulk_head_free [j ]);
3180
3175
}
3181
3176
3182
3177
// Channel 3 corresponds to both SLAB and vmalloc
@@ -3288,10 +3283,11 @@ add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
3288
3283
return false;
3289
3284
3290
3285
idx = !!is_vmalloc_addr (ptr );
3286
+ bnode = list_first_entry_or_null (& (* krcp )-> bulk_head [idx ],
3287
+ struct kvfree_rcu_bulk_data , list );
3291
3288
3292
3289
/* Check if a new block is required. */
3293
- if (!(* krcp )-> bkvhead [idx ] ||
3294
- (* krcp )-> bkvhead [idx ]-> nr_records == KVFREE_BULK_MAX_ENTR ) {
3290
+ if (!bnode || bnode -> nr_records == KVFREE_BULK_MAX_ENTR ) {
3295
3291
bnode = get_cached_bnode (* krcp );
3296
3292
if (!bnode && can_alloc ) {
3297
3293
krc_this_cpu_unlock (* krcp , * flags );
@@ -3315,18 +3311,13 @@ add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
3315
3311
if (!bnode )
3316
3312
return false;
3317
3313
3318
- /* Initialize the new block. */
3314
+ // Initialize the new block and attach it.
3319
3315
bnode -> nr_records = 0 ;
3320
- bnode -> next = (* krcp )-> bkvhead [idx ];
3321
-
3322
- /* Attach it to the head. */
3323
- (* krcp )-> bkvhead [idx ] = bnode ;
3316
+ list_add (& bnode -> list , & (* krcp )-> bulk_head [idx ]);
3324
3317
}
3325
3318
3326
3319
/* Finally insert. */
3327
- (* krcp )-> bkvhead [idx ]-> records
3328
- [(* krcp )-> bkvhead [idx ]-> nr_records ++ ] = ptr ;
3329
-
3320
+ bnode -> records [bnode -> nr_records ++ ] = ptr ;
3330
3321
return true;
3331
3322
}
3332
3323
@@ -4761,7 +4752,7 @@ struct workqueue_struct *rcu_gp_wq;
4761
4752
static void __init kfree_rcu_batch_init (void )
4762
4753
{
4763
4754
int cpu ;
4764
- int i ;
4755
+ int i , j ;
4765
4756
4766
4757
/* Clamp it to [0:100] seconds interval. */
4767
4758
if (rcu_delay_page_cache_fill_msec < 0 ||
@@ -4781,8 +4772,14 @@ static void __init kfree_rcu_batch_init(void)
4781
4772
for (i = 0 ; i < KFREE_N_BATCHES ; i ++ ) {
4782
4773
INIT_RCU_WORK (& krcp -> krw_arr [i ].rcu_work , kfree_rcu_work );
4783
4774
krcp -> krw_arr [i ].krcp = krcp ;
4775
+
4776
+ for (j = 0 ; j < FREE_N_CHANNELS ; j ++ )
4777
+ INIT_LIST_HEAD (& krcp -> krw_arr [i ].bulk_head_free [j ]);
4784
4778
}
4785
4779
4780
+ for (i = 0 ; i < FREE_N_CHANNELS ; i ++ )
4781
+ INIT_LIST_HEAD (& krcp -> bulk_head [i ]);
4782
+
4786
4783
INIT_DELAYED_WORK (& krcp -> monitor_work , kfree_rcu_monitor );
4787
4784
INIT_DELAYED_WORK (& krcp -> page_cache_work , fill_page_cache_func );
4788
4785
krcp -> initialized = true;
0 commit comments