@@ -2921,7 +2921,8 @@ struct kfree_rcu_cpu_work {
2921
2921
* @lock: Synchronize access to this structure
2922
2922
* @monitor_work: Promote @head to @head_free after KFREE_DRAIN_JIFFIES
2923
2923
* @initialized: The @rcu_work fields have been initialized
2924
- * @count: Number of objects for which GP not started
2924
+ * @head_count: Number of objects in rcu_head singular list
2925
+ * @bulk_count: Number of objects in bulk-list
2925
2926
* @bkvcache:
2926
2927
* A simple cache list that contains objects for reuse purpose.
2927
2928
* In order to save some per-cpu space the list is singular.
@@ -2939,13 +2940,19 @@ struct kfree_rcu_cpu_work {
2939
2940
* the interactions with the slab allocators.
2940
2941
*/
2941
2942
struct kfree_rcu_cpu {
2943
+ // Objects queued on a linked list
2944
+ // through their rcu_head structures.
2942
2945
struct rcu_head * head ;
2946
+ atomic_t head_count ;
2947
+
2948
+ // Objects queued on a bulk-list.
2943
2949
struct list_head bulk_head [FREE_N_CHANNELS ];
2950
+ atomic_t bulk_count [FREE_N_CHANNELS ];
2951
+
2944
2952
struct kfree_rcu_cpu_work krw_arr [KFREE_N_BATCHES ];
2945
2953
raw_spinlock_t lock ;
2946
2954
struct delayed_work monitor_work ;
2947
2955
bool initialized ;
2948
- int count ;
2949
2956
2950
2957
struct delayed_work page_cache_work ;
2951
2958
atomic_t backoff_page_cache_fill ;
@@ -3168,12 +3175,23 @@ need_offload_krc(struct kfree_rcu_cpu *krcp)
3168
3175
return !!READ_ONCE (krcp -> head );
3169
3176
}
3170
3177
3178
+ static int krc_count (struct kfree_rcu_cpu * krcp )
3179
+ {
3180
+ int sum = atomic_read (& krcp -> head_count );
3181
+ int i ;
3182
+
3183
+ for (i = 0 ; i < FREE_N_CHANNELS ; i ++ )
3184
+ sum += atomic_read (& krcp -> bulk_count [i ]);
3185
+
3186
+ return sum ;
3187
+ }
3188
+
3171
3189
static void
3172
3190
schedule_delayed_monitor_work (struct kfree_rcu_cpu * krcp )
3173
3191
{
3174
3192
long delay , delay_left ;
3175
3193
3176
- delay = READ_ONCE (krcp -> count ) >= KVFREE_BULK_MAX_ENTR ? 1 :KFREE_DRAIN_JIFFIES ;
3194
+ delay = krc_count (krcp ) >= KVFREE_BULK_MAX_ENTR ? 1 :KFREE_DRAIN_JIFFIES ;
3177
3195
if (delayed_work_pending (& krcp -> monitor_work )) {
3178
3196
delay_left = krcp -> monitor_work .timer .expires - jiffies ;
3179
3197
if (delay < delay_left )
@@ -3211,24 +3229,25 @@ static void kfree_rcu_monitor(struct work_struct *work)
3211
3229
// Channel 1 corresponds to the SLAB-pointer bulk path.
3212
3230
// Channel 2 corresponds to vmalloc-pointer bulk path.
3213
3231
for (j = 0 ; j < FREE_N_CHANNELS ; j ++ ) {
3214
- if (list_empty (& krwp -> bulk_head_free [j ]))
3232
+ if (list_empty (& krwp -> bulk_head_free [j ])) {
3215
3233
list_replace_init (& krcp -> bulk_head [j ], & krwp -> bulk_head_free [j ]);
3234
+ atomic_set (& krcp -> bulk_count [j ], 0 );
3235
+ }
3216
3236
}
3217
3237
3218
3238
// Channel 3 corresponds to both SLAB and vmalloc
3219
3239
// objects queued on the linked list.
3220
3240
if (!krwp -> head_free ) {
3221
3241
krwp -> head_free = krcp -> head ;
3222
3242
WRITE_ONCE (krcp -> head , NULL );
3243
+ atomic_set (& krcp -> head_count , 0 );
3223
3244
3224
3245
// Take a snapshot for this krwp. Please note no more
3225
3246
// any objects can be added to attached head_free channel
3226
3247
// therefore fixate a GP for it here.
3227
3248
krwp -> head_free_gp_snap = get_state_synchronize_rcu ();
3228
3249
}
3229
3250
3230
- WRITE_ONCE (krcp -> count , 0 );
3231
-
3232
3251
// One work is per one batch, so there are three
3233
3252
// "free channels", the batch can handle. It can
3234
3253
// be that the work is in the pending state when
@@ -3365,6 +3384,8 @@ add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
3365
3384
// Finally insert and update the GP for this page.
3366
3385
bnode -> records [bnode -> nr_records ++ ] = ptr ;
3367
3386
bnode -> gp_snap = get_state_synchronize_rcu ();
3387
+ atomic_inc (& (* krcp )-> bulk_count [idx ]);
3388
+
3368
3389
return true;
3369
3390
}
3370
3391
@@ -3418,11 +3439,10 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
3418
3439
head -> func = ptr ;
3419
3440
head -> next = krcp -> head ;
3420
3441
WRITE_ONCE (krcp -> head , head );
3442
+ atomic_inc (& krcp -> head_count );
3421
3443
success = true;
3422
3444
}
3423
3445
3424
- WRITE_ONCE (krcp -> count , krcp -> count + 1 );
3425
-
3426
3446
// Set timer to drain after KFREE_DRAIN_JIFFIES.
3427
3447
if (rcu_scheduler_active == RCU_SCHEDULER_RUNNING )
3428
3448
schedule_delayed_monitor_work (krcp );
@@ -3453,7 +3473,7 @@ kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
3453
3473
for_each_possible_cpu (cpu ) {
3454
3474
struct kfree_rcu_cpu * krcp = per_cpu_ptr (& krc , cpu );
3455
3475
3456
- count += READ_ONCE (krcp -> count );
3476
+ count += krc_count (krcp );
3457
3477
count += READ_ONCE (krcp -> nr_bkv_objs );
3458
3478
atomic_set (& krcp -> backoff_page_cache_fill , 1 );
3459
3479
}
@@ -3470,7 +3490,7 @@ kfree_rcu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
3470
3490
int count ;
3471
3491
struct kfree_rcu_cpu * krcp = per_cpu_ptr (& krc , cpu );
3472
3492
3473
- count = krcp -> count ;
3493
+ count = krc_count ( krcp ) ;
3474
3494
count += drain_page_cache (krcp );
3475
3495
kfree_rcu_monitor (& krcp -> monitor_work .work );
3476
3496
0 commit comments