@@ -105,6 +105,19 @@ static char *scale_type = "rcu";
105
105
module_param (scale_type , charp , 0444 );
106
106
MODULE_PARM_DESC (scale_type , "Type of RCU to scalability-test (rcu, srcu, ...)" );
107
107
108
+ // Structure definitions for custom fixed-per-task allocator.
109
+ struct writer_mblock {
110
+ struct rcu_head wmb_rh ;
111
+ struct llist_node wmb_node ;
112
+ struct writer_freelist * wmb_wfl ;
113
+ };
114
+
115
+ struct writer_freelist {
116
+ struct llist_head ws_lhg ;
117
+ struct llist_head ____cacheline_internodealigned_in_smp ws_lhp ;
118
+ struct writer_mblock * ws_mblocks ;
119
+ };
120
+
108
121
static int nrealreaders ;
109
122
static int nrealwriters ;
110
123
static struct task_struct * * writer_tasks ;
@@ -113,6 +126,7 @@ static struct task_struct *shutdown_task;
113
126
114
127
static u64 * * writer_durations ;
115
128
static bool * writer_done ;
129
+ static struct writer_freelist * writer_freelists ;
116
130
static int * writer_n_durations ;
117
131
static atomic_t n_rcu_scale_reader_started ;
118
132
static atomic_t n_rcu_scale_writer_started ;
@@ -463,13 +477,52 @@ rcu_scale_reader(void *arg)
463
477
return 0 ;
464
478
}
465
479
480
+ /*
481
+ * Allocate a writer_mblock structure for the specified rcu_scale_writer
482
+ * task.
483
+ */
484
+ static struct writer_mblock * rcu_scale_alloc (long me )
485
+ {
486
+ struct llist_node * llnp ;
487
+ struct writer_freelist * wflp ;
488
+ struct writer_mblock * wmbp ;
489
+
490
+ if (WARN_ON_ONCE (!writer_freelists ))
491
+ return NULL ;
492
+ wflp = & writer_freelists [me ];
493
+ if (llist_empty (& wflp -> ws_lhp )) {
494
+ // ->ws_lhp is private to its rcu_scale_writer task.
495
+ wmbp = container_of (llist_del_all (& wflp -> ws_lhg ), struct writer_mblock , wmb_node );
496
+ wflp -> ws_lhp .first = & wmbp -> wmb_node ;
497
+ }
498
+ llnp = llist_del_first (& wflp -> ws_lhp );
499
+ if (!llnp )
500
+ return NULL ;
501
+ return container_of (llnp , struct writer_mblock , wmb_node );
502
+ }
503
+
504
+ /*
505
+ * Free a writer_mblock structure to its rcu_scale_writer task.
506
+ */
507
+ static void rcu_scale_free (struct writer_mblock * wmbp )
508
+ {
509
+ struct writer_freelist * wflp ;
510
+
511
+ if (!wmbp )
512
+ return ;
513
+ wflp = wmbp -> wmb_wfl ;
514
+ llist_add (& wmbp -> wmb_node , & wflp -> ws_lhg );
515
+ }
516
+
466
517
/*
467
518
* Callback function for asynchronous grace periods from rcu_scale_writer().
468
519
*/
469
520
static void rcu_scale_async_cb (struct rcu_head * rhp )
470
521
{
522
+ struct writer_mblock * wmbp = container_of (rhp , struct writer_mblock , wmb_rh );
523
+
471
524
atomic_dec (this_cpu_ptr (& n_async_inflight ));
472
- kfree ( rhp );
525
+ rcu_scale_free ( wmbp );
473
526
}
474
527
475
528
/*
@@ -482,13 +535,13 @@ rcu_scale_writer(void *arg)
482
535
int i_max ;
483
536
unsigned long jdone ;
484
537
long me = (long )arg ;
485
- struct rcu_head * rhp = NULL ;
486
538
bool selfreport = false;
487
539
bool started = false, done = false, alldone = false;
488
540
u64 t ;
489
541
DEFINE_TORTURE_RANDOM (tr );
490
542
u64 * wdp ;
491
543
u64 * wdpp = writer_durations [me ];
544
+ struct writer_mblock * wmbp = NULL ;
492
545
493
546
VERBOSE_SCALEOUT_STRING ("rcu_scale_writer task started" );
494
547
WARN_ON (!wdpp );
@@ -529,17 +582,18 @@ rcu_scale_writer(void *arg)
529
582
wdp = & wdpp [i ];
530
583
* wdp = ktime_get_mono_fast_ns ();
531
584
if (gp_async && !WARN_ON_ONCE (!cur_ops -> async )) {
532
- if (!rhp )
533
- rhp = kmalloc ( sizeof ( * rhp ), GFP_KERNEL );
534
- if (rhp && atomic_read (this_cpu_ptr (& n_async_inflight )) < gp_async_max ) {
585
+ if (!wmbp )
586
+ wmbp = rcu_scale_alloc ( me );
587
+ if (wmbp && atomic_read (this_cpu_ptr (& n_async_inflight )) < gp_async_max ) {
535
588
atomic_inc (this_cpu_ptr (& n_async_inflight ));
536
- cur_ops -> async (rhp , rcu_scale_async_cb );
537
- rhp = NULL ;
589
+ cur_ops -> async (& wmbp -> wmb_rh , rcu_scale_async_cb );
590
+ wmbp = NULL ;
538
591
gp_succeeded = true;
539
592
} else if (!kthread_should_stop ()) {
540
593
cur_ops -> gp_barrier ();
541
594
} else {
542
- kfree (rhp ); /* Because we are stopping. */
595
+ rcu_scale_free (wmbp ); /* Because we are stopping. */
596
+ wmbp = NULL ;
543
597
}
544
598
} else if (gp_exp ) {
545
599
cur_ops -> exp_sync ();
@@ -607,6 +661,7 @@ rcu_scale_writer(void *arg)
607
661
rcu_scale_wait_shutdown ();
608
662
} while (!torture_must_stop ());
609
663
if (gp_async && cur_ops -> async ) {
664
+ rcu_scale_free (wmbp );
610
665
cur_ops -> gp_barrier ();
611
666
}
612
667
writer_n_durations [me ] = i_max + 1 ;
@@ -970,12 +1025,30 @@ rcu_scale_cleanup(void)
970
1025
schedule_timeout_uninterruptible (1 );
971
1026
}
972
1027
kfree (writer_durations [i ]);
1028
+ if (writer_freelists ) {
1029
+ int ctr = 0 ;
1030
+ struct llist_node * llnp ;
1031
+ struct writer_freelist * wflp = & writer_freelists [i ];
1032
+
1033
+ if (wflp -> ws_mblocks ) {
1034
+ llist_for_each (llnp , wflp -> ws_lhg .first )
1035
+ ctr ++ ;
1036
+ llist_for_each (llnp , wflp -> ws_lhp .first )
1037
+ ctr ++ ;
1038
+ WARN_ONCE (ctr != gp_async_max ,
1039
+ "%s: ctr = %d gp_async_max = %d\n" ,
1040
+ __func__ , ctr , gp_async_max );
1041
+ kfree (wflp -> ws_mblocks );
1042
+ }
1043
+ }
973
1044
}
974
1045
kfree (writer_tasks );
975
1046
kfree (writer_durations );
976
1047
kfree (writer_n_durations );
977
1048
kfree (writer_done );
978
1049
writer_done = NULL ;
1050
+ kfree (writer_freelists );
1051
+ writer_freelists = NULL ;
979
1052
}
980
1053
981
1054
/* Do torture-type-specific cleanup operations. */
@@ -1002,8 +1075,9 @@ rcu_scale_shutdown(void *arg)
1002
1075
static int __init
1003
1076
rcu_scale_init (void )
1004
1077
{
1005
- long i ;
1006
1078
int firsterr = 0 ;
1079
+ long i ;
1080
+ long j ;
1007
1081
static struct rcu_scale_ops * scale_ops [] = {
1008
1082
& rcu_ops , & srcu_ops , & srcud_ops , TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS
1009
1083
};
@@ -1074,7 +1148,18 @@ rcu_scale_init(void)
1074
1148
writer_durations = kcalloc (nrealwriters , sizeof (* writer_durations ), GFP_KERNEL );
1075
1149
writer_n_durations = kcalloc (nrealwriters , sizeof (* writer_n_durations ), GFP_KERNEL );
1076
1150
writer_done = kcalloc (nrealwriters , sizeof (writer_done [0 ]), GFP_KERNEL );
1077
- if (!writer_tasks || !writer_durations || !writer_n_durations || !writer_done ) {
1151
+ if (gp_async ) {
1152
+ if (gp_async_max <= 0 ) {
1153
+ pr_warn ("%s: gp_async_max = %d must be greater than zero.\n" ,
1154
+ __func__ , gp_async_max );
1155
+ WARN_ON_ONCE (IS_BUILTIN (CONFIG_RCU_TORTURE_TEST ));
1156
+ firsterr = - EINVAL ;
1157
+ goto unwind ;
1158
+ }
1159
+ writer_freelists = kcalloc (nrealwriters , sizeof (writer_freelists [0 ]), GFP_KERNEL );
1160
+ }
1161
+ if (!writer_tasks || !writer_durations || !writer_n_durations || !writer_done ||
1162
+ (gp_async && !writer_freelists )) {
1078
1163
SCALEOUT_ERRSTRING ("out of memory" );
1079
1164
firsterr = - ENOMEM ;
1080
1165
goto unwind ;
@@ -1087,6 +1172,24 @@ rcu_scale_init(void)
1087
1172
firsterr = - ENOMEM ;
1088
1173
goto unwind ;
1089
1174
}
1175
+ if (writer_freelists ) {
1176
+ struct writer_freelist * wflp = & writer_freelists [i ];
1177
+
1178
+ init_llist_head (& wflp -> ws_lhg );
1179
+ init_llist_head (& wflp -> ws_lhp );
1180
+ wflp -> ws_mblocks = kcalloc (gp_async_max , sizeof (wflp -> ws_mblocks [0 ]),
1181
+ GFP_KERNEL );
1182
+ if (!wflp -> ws_mblocks ) {
1183
+ firsterr = - ENOMEM ;
1184
+ goto unwind ;
1185
+ }
1186
+ for (j = 0 ; j < gp_async_max ; j ++ ) {
1187
+ struct writer_mblock * wmbp = & wflp -> ws_mblocks [j ];
1188
+
1189
+ wmbp -> wmb_wfl = wflp ;
1190
+ llist_add (& wmbp -> wmb_node , & wflp -> ws_lhp );
1191
+ }
1192
+ }
1090
1193
firsterr = torture_create_kthread (rcu_scale_writer , (void * )i ,
1091
1194
writer_tasks [i ]);
1092
1195
if (torture_init_error (firsterr ))
0 commit comments