@@ -2970,13 +2970,11 @@ EXPORT_SYMBOL_GPL(call_rcu);
2970
2970
* @nr_records: Number of active pointers in the array
2971
2971
* @records: Array of the kfree_rcu() pointers
2972
2972
* @next: Next bulk object in the block chain
2973
- * @head_free_debug: For debug, when CONFIG_DEBUG_OBJECTS_RCU_HEAD is set
2974
2973
*/
2975
2974
struct kfree_rcu_bulk_data {
2976
2975
unsigned long nr_records ;
2977
2976
void * records [KFREE_BULK_MAX_ENTR ];
2978
2977
struct kfree_rcu_bulk_data * next ;
2979
- struct rcu_head * head_free_debug ;
2980
2978
};
2981
2979
2982
2980
/**
@@ -3026,11 +3024,13 @@ struct kfree_rcu_cpu {
3026
3024
static DEFINE_PER_CPU (struct kfree_rcu_cpu , krc ) ;
3027
3025
3028
3026
static __always_inline void
3029
- debug_rcu_head_unqueue_bulk (struct rcu_head * head )
3027
+ debug_rcu_bhead_unqueue (struct kfree_rcu_bulk_data * bhead )
3030
3028
{
3031
3029
#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3032
- for (; head ; head = head -> next )
3033
- debug_rcu_head_unqueue (head );
3030
+ int i ;
3031
+
3032
+ for (i = 0 ; i < bhead -> nr_records ; i ++ )
3033
+ debug_rcu_head_unqueue ((struct rcu_head * )(bhead -> records [i ]));
3034
3034
#endif
3035
3035
}
3036
3036
@@ -3060,7 +3060,7 @@ static void kfree_rcu_work(struct work_struct *work)
3060
3060
for (; bhead ; bhead = bnext ) {
3061
3061
bnext = bhead -> next ;
3062
3062
3063
- debug_rcu_head_unqueue_bulk (bhead -> head_free_debug );
3063
+ debug_rcu_bhead_unqueue (bhead );
3064
3064
3065
3065
rcu_lock_acquire (& rcu_callback_map );
3066
3066
trace_rcu_invoke_kfree_bulk_callback (rcu_state .name ,
@@ -3082,14 +3082,15 @@ static void kfree_rcu_work(struct work_struct *work)
3082
3082
*/
3083
3083
for (; head ; head = next ) {
3084
3084
unsigned long offset = (unsigned long )head -> func ;
3085
+ void * ptr = (void * )head - offset ;
3085
3086
3086
3087
next = head -> next ;
3087
- debug_rcu_head_unqueue (head );
3088
+ debug_rcu_head_unqueue (( struct rcu_head * ) ptr );
3088
3089
rcu_lock_acquire (& rcu_callback_map );
3089
3090
trace_rcu_invoke_kfree_callback (rcu_state .name , head , offset );
3090
3091
3091
3092
if (!WARN_ON_ONCE (!__is_kfree_rcu_offset (offset )))
3092
- kfree (( void * ) head - offset );
3093
+ kfree (ptr );
3093
3094
3094
3095
rcu_lock_release (& rcu_callback_map );
3095
3096
cond_resched_tasks_rcu_qs ();
@@ -3228,18 +3229,11 @@ kfree_call_rcu_add_ptr_to_bulk(struct kfree_rcu_cpu *krcp,
3228
3229
/* Initialize the new block. */
3229
3230
bnode -> nr_records = 0 ;
3230
3231
bnode -> next = krcp -> bhead ;
3231
- bnode -> head_free_debug = NULL ;
3232
3232
3233
3233
/* Attach it to the head. */
3234
3234
krcp -> bhead = bnode ;
3235
3235
}
3236
3236
3237
- #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3238
- head -> func = func ;
3239
- head -> next = krcp -> bhead -> head_free_debug ;
3240
- krcp -> bhead -> head_free_debug = head ;
3241
- #endif
3242
-
3243
3237
/* Finally insert. */
3244
3238
krcp -> bhead -> records [krcp -> bhead -> nr_records ++ ] =
3245
3239
(void * ) head - (unsigned long ) func ;
@@ -3263,14 +3257,17 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func)
3263
3257
{
3264
3258
unsigned long flags ;
3265
3259
struct kfree_rcu_cpu * krcp ;
3260
+ void * ptr ;
3266
3261
3267
3262
local_irq_save (flags ); // For safely calling this_cpu_ptr().
3268
3263
krcp = this_cpu_ptr (& krc );
3269
3264
if (krcp -> initialized )
3270
3265
raw_spin_lock (& krcp -> lock );
3271
3266
3267
+ ptr = (void * )head - (unsigned long )func ;
3268
+
3272
3269
// Queue the object but don't yet schedule the batch.
3273
- if (debug_rcu_head_queue (head )) {
3270
+ if (debug_rcu_head_queue (ptr )) {
3274
3271
// Probable double kfree_rcu(), just leak.
3275
3272
WARN_ONCE (1 , "%s(): Double-freed call. rcu_head %p\n" ,
3276
3273
__func__ , head );
0 commit comments