@@ -2900,22 +2900,21 @@ struct kvfree_rcu_bulk_data {
2900
2900
* struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests
2901
2901
* @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period
2902
2902
* @head_free: List of kfree_rcu() objects waiting for a grace period
2903
- * @head_free_gp_snap: Snapshot of RCU state for objects placed to "@head_free"
2904
2903
* @bulk_head_free: Bulk-List of kvfree_rcu() objects waiting for a grace period
2905
2904
* @krcp: Pointer to @kfree_rcu_cpu structure
2906
2905
*/
2907
2906
2908
2907
struct kfree_rcu_cpu_work {
2909
- struct work_struct rcu_work ;
2908
+ struct rcu_work rcu_work ;
2910
2909
struct rcu_head * head_free ;
2911
- unsigned long head_free_gp_snap ;
2912
2910
struct list_head bulk_head_free [FREE_N_CHANNELS ];
2913
2911
struct kfree_rcu_cpu * krcp ;
2914
2912
};
2915
2913
2916
2914
/**
2917
2915
* struct kfree_rcu_cpu - batch up kfree_rcu() requests for RCU grace period
2918
2916
* @head: List of kfree_rcu() objects not yet waiting for a grace period
2917
+ * @head_gp_snap: Snapshot of RCU state for objects placed to "@head"
2919
2918
* @bulk_head: Bulk-List of kvfree_rcu() objects not yet waiting for a grace period
2920
2919
* @krw_arr: Array of batches of kfree_rcu() objects waiting for a grace period
2921
2920
* @lock: Synchronize access to this structure
@@ -2943,6 +2942,7 @@ struct kfree_rcu_cpu {
2943
2942
// Objects queued on a linked list
2944
2943
// through their rcu_head structures.
2945
2944
struct rcu_head * head ;
2945
+ unsigned long head_gp_snap ;
2946
2946
atomic_t head_count ;
2947
2947
2948
2948
// Objects queued on a bulk-list.
@@ -3111,10 +3111,9 @@ static void kfree_rcu_work(struct work_struct *work)
3111
3111
struct rcu_head * head ;
3112
3112
struct kfree_rcu_cpu * krcp ;
3113
3113
struct kfree_rcu_cpu_work * krwp ;
3114
- unsigned long head_free_gp_snap ;
3115
3114
int i ;
3116
3115
3117
- krwp = container_of (work ,
3116
+ krwp = container_of (to_rcu_work ( work ) ,
3118
3117
struct kfree_rcu_cpu_work , rcu_work );
3119
3118
krcp = krwp -> krcp ;
3120
3119
@@ -3126,26 +3125,11 @@ static void kfree_rcu_work(struct work_struct *work)
3126
3125
// Channel 3.
3127
3126
head = krwp -> head_free ;
3128
3127
krwp -> head_free = NULL ;
3129
- head_free_gp_snap = krwp -> head_free_gp_snap ;
3130
3128
raw_spin_unlock_irqrestore (& krcp -> lock , flags );
3131
3129
3132
3130
// Handle the first two channels.
3133
3131
for (i = 0 ; i < FREE_N_CHANNELS ; i ++ ) {
3134
3132
// Start from the tail page, so a GP is likely passed for it.
3135
- list_for_each_entry_safe_reverse (bnode , n , & bulk_head [i ], list ) {
3136
- // Not yet ready? Bail out since we need one more GP.
3137
- if (!poll_state_synchronize_rcu (bnode -> gp_snap ))
3138
- break ;
3139
-
3140
- list_del_init (& bnode -> list );
3141
- kvfree_rcu_bulk (krcp , bnode , i );
3142
- }
3143
-
3144
- // Please note a request for one more extra GP can
3145
- // occur only once for all objects in this batch.
3146
- if (!list_empty (& bulk_head [i ]))
3147
- synchronize_rcu ();
3148
-
3149
3133
list_for_each_entry_safe (bnode , n , & bulk_head [i ], list )
3150
3134
kvfree_rcu_bulk (krcp , bnode , i );
3151
3135
}
@@ -3157,10 +3141,7 @@ static void kfree_rcu_work(struct work_struct *work)
3157
3141
* queued on a linked list through their rcu_head structures.
3158
3142
* This list is named "Channel 3".
3159
3143
*/
3160
- if (head ) {
3161
- cond_synchronize_rcu (head_free_gp_snap );
3162
- kvfree_rcu_list (head );
3163
- }
3144
+ kvfree_rcu_list (head );
3164
3145
}
3165
3146
3166
3147
static bool
@@ -3201,6 +3182,44 @@ schedule_delayed_monitor_work(struct kfree_rcu_cpu *krcp)
3201
3182
queue_delayed_work (system_wq , & krcp -> monitor_work , delay );
3202
3183
}
3203
3184
3185
+ static void
3186
+ kvfree_rcu_drain_ready (struct kfree_rcu_cpu * krcp )
3187
+ {
3188
+ struct list_head bulk_ready [FREE_N_CHANNELS ];
3189
+ struct kvfree_rcu_bulk_data * bnode , * n ;
3190
+ struct rcu_head * head_ready = NULL ;
3191
+ unsigned long flags ;
3192
+ int i ;
3193
+
3194
+ raw_spin_lock_irqsave (& krcp -> lock , flags );
3195
+ for (i = 0 ; i < FREE_N_CHANNELS ; i ++ ) {
3196
+ INIT_LIST_HEAD (& bulk_ready [i ]);
3197
+
3198
+ list_for_each_entry_safe_reverse (bnode , n , & krcp -> bulk_head [i ], list ) {
3199
+ if (!poll_state_synchronize_rcu (bnode -> gp_snap ))
3200
+ break ;
3201
+
3202
+ atomic_sub (bnode -> nr_records , & krcp -> bulk_count [i ]);
3203
+ list_move (& bnode -> list , & bulk_ready [i ]);
3204
+ }
3205
+ }
3206
+
3207
+ if (krcp -> head && poll_state_synchronize_rcu (krcp -> head_gp_snap )) {
3208
+ head_ready = krcp -> head ;
3209
+ atomic_set (& krcp -> head_count , 0 );
3210
+ WRITE_ONCE (krcp -> head , NULL );
3211
+ }
3212
+ raw_spin_unlock_irqrestore (& krcp -> lock , flags );
3213
+
3214
+ for (i = 0 ; i < FREE_N_CHANNELS ; i ++ ) {
3215
+ list_for_each_entry_safe (bnode , n , & bulk_ready [i ], list )
3216
+ kvfree_rcu_bulk (krcp , bnode , i );
3217
+ }
3218
+
3219
+ if (head_ready )
3220
+ kvfree_rcu_list (head_ready );
3221
+ }
3222
+
3204
3223
/*
3205
3224
* This function is invoked after the KFREE_DRAIN_JIFFIES timeout.
3206
3225
*/
@@ -3211,6 +3230,9 @@ static void kfree_rcu_monitor(struct work_struct *work)
3211
3230
unsigned long flags ;
3212
3231
int i , j ;
3213
3232
3233
+ // Drain ready for reclaim.
3234
+ kvfree_rcu_drain_ready (krcp );
3235
+
3214
3236
raw_spin_lock_irqsave (& krcp -> lock , flags );
3215
3237
3216
3238
// Attempt to start a new batch.
@@ -3230,30 +3252,26 @@ static void kfree_rcu_monitor(struct work_struct *work)
3230
3252
// Channel 2 corresponds to vmalloc-pointer bulk path.
3231
3253
for (j = 0 ; j < FREE_N_CHANNELS ; j ++ ) {
3232
3254
if (list_empty (& krwp -> bulk_head_free [j ])) {
3233
- list_replace_init (& krcp -> bulk_head [j ], & krwp -> bulk_head_free [j ]);
3234
3255
atomic_set (& krcp -> bulk_count [j ], 0 );
3256
+ list_replace_init (& krcp -> bulk_head [j ],
3257
+ & krwp -> bulk_head_free [j ]);
3235
3258
}
3236
3259
}
3237
3260
3238
3261
// Channel 3 corresponds to both SLAB and vmalloc
3239
3262
// objects queued on the linked list.
3240
3263
if (!krwp -> head_free ) {
3241
3264
krwp -> head_free = krcp -> head ;
3242
- WRITE_ONCE (krcp -> head , NULL );
3243
3265
atomic_set (& krcp -> head_count , 0 );
3244
-
3245
- // Take a snapshot for this krwp. Please note no more
3246
- // any objects can be added to attached head_free channel
3247
- // therefore fixate a GP for it here.
3248
- krwp -> head_free_gp_snap = get_state_synchronize_rcu ();
3266
+ WRITE_ONCE (krcp -> head , NULL );
3249
3267
}
3250
3268
3251
3269
// One work is per one batch, so there are three
3252
3270
// "free channels", the batch can handle. It can
3253
3271
// be that the work is in the pending state when
3254
3272
// channels have been detached following by each
3255
3273
// other.
3256
- queue_work (system_wq , & krwp -> rcu_work );
3274
+ queue_rcu_work (system_wq , & krwp -> rcu_work );
3257
3275
}
3258
3276
}
3259
3277
@@ -3440,6 +3458,9 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
3440
3458
head -> next = krcp -> head ;
3441
3459
WRITE_ONCE (krcp -> head , head );
3442
3460
atomic_inc (& krcp -> head_count );
3461
+
3462
+ // Take a snapshot for this krcp.
3463
+ krcp -> head_gp_snap = get_state_synchronize_rcu ();
3443
3464
success = true;
3444
3465
}
3445
3466
@@ -4834,7 +4855,7 @@ static void __init kfree_rcu_batch_init(void)
4834
4855
struct kfree_rcu_cpu * krcp = per_cpu_ptr (& krc , cpu );
4835
4856
4836
4857
for (i = 0 ; i < KFREE_N_BATCHES ; i ++ ) {
4837
- INIT_WORK (& krcp -> krw_arr [i ].rcu_work , kfree_rcu_work );
4858
+ INIT_RCU_WORK (& krcp -> krw_arr [i ].rcu_work , kfree_rcu_work );
4838
4859
krcp -> krw_arr [i ].krcp = krcp ;
4839
4860
4840
4861
for (j = 0 ; j < FREE_N_CHANNELS ; j ++ )
0 commit comments