@@ -97,6 +97,7 @@ struct scf_statistics {
9797static struct scf_statistics * scf_stats_p ;
9898static struct task_struct * scf_torture_stats_task ;
9999static DEFINE_PER_CPU (long long, scf_invoked_count ) ;
100+ static DEFINE_PER_CPU (struct llist_head , scf_free_pool ) ;
100101
101102// Data for random primitive selection
102103#define SCF_PRIM_RESCHED 0
@@ -133,6 +134,7 @@ struct scf_check {
133134 bool scfc_wait ;
134135 bool scfc_rpc ;
135136 struct completion scfc_completion ;
137+ struct llist_node scf_node ;
136138};
137139
138140// Use to wait for all threads to start.
@@ -148,6 +150,33 @@ static DEFINE_TORTURE_RANDOM_PERCPU(scf_torture_rand);
148150
149151extern void resched_cpu (int cpu ); // An alternative IPI vector.
150152
153+ static void scf_add_to_free_list (struct scf_check * scfcp )
154+ {
155+ struct llist_head * pool ;
156+ unsigned int cpu ;
157+
158+ if (!scfcp )
159+ return ;
160+ cpu = raw_smp_processor_id () % nthreads ;
161+ pool = & per_cpu (scf_free_pool , cpu );
162+ llist_add (& scfcp -> scf_node , pool );
163+ }
164+
165+ static void scf_cleanup_free_list (unsigned int cpu )
166+ {
167+ struct llist_head * pool ;
168+ struct llist_node * node ;
169+ struct scf_check * scfcp ;
170+
171+ pool = & per_cpu (scf_free_pool , cpu );
172+ node = llist_del_all (pool );
173+ while (node ) {
174+ scfcp = llist_entry (node , struct scf_check , scf_node );
175+ node = node -> next ;
176+ kfree (scfcp );
177+ }
178+ }
179+
151180// Print torture statistics. Caller must ensure serialization.
152181static void scf_torture_stats_print (void )
153182{
@@ -296,7 +325,7 @@ static void scf_handler(void *scfc_in)
296325 if (scfcp -> scfc_rpc )
297326 complete (& scfcp -> scfc_completion );
298327 } else {
299- kfree (scfcp );
328+ scf_add_to_free_list (scfcp );
300329 }
301330}
302331
@@ -320,10 +349,6 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
320349 struct scf_check * scfcp = NULL ;
321350 struct scf_selector * scfsp = scf_sel_rand (trsp );
322351
323- if (use_cpus_read_lock )
324- cpus_read_lock ();
325- else
326- preempt_disable ();
327352 if (scfsp -> scfs_prim == SCF_PRIM_SINGLE || scfsp -> scfs_wait ) {
328353 scfcp = kmalloc (sizeof (* scfcp ), GFP_ATOMIC );
329354 if (!scfcp ) {
@@ -337,6 +362,10 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
337362 scfcp -> scfc_rpc = false;
338363 }
339364 }
365+ if (use_cpus_read_lock )
366+ cpus_read_lock ();
367+ else
368+ preempt_disable ();
340369 switch (scfsp -> scfs_prim ) {
341370 case SCF_PRIM_RESCHED :
342371 if (IS_BUILTIN (CONFIG_SCF_TORTURE_TEST )) {
@@ -363,7 +392,7 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
363392 scfp -> n_single_wait_ofl ++ ;
364393 else
365394 scfp -> n_single_ofl ++ ;
366- kfree (scfcp );
395+ scf_add_to_free_list (scfcp );
367396 scfcp = NULL ;
368397 }
369398 break ;
@@ -391,7 +420,7 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
391420 preempt_disable ();
392421 } else {
393422 scfp -> n_single_rpc_ofl ++ ;
394- kfree (scfcp );
423+ scf_add_to_free_list (scfcp );
395424 scfcp = NULL ;
396425 }
397426 break ;
@@ -428,7 +457,7 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
428457 pr_warn ("%s: Memory-ordering failure, scfs_prim: %d.\n" , __func__ , scfsp -> scfs_prim );
429458 atomic_inc (& n_mb_out_errs ); // Leak rather than trash!
430459 } else {
431- kfree (scfcp );
460+ scf_add_to_free_list (scfcp );
432461 }
433462 barrier (); // Prevent race-reduction compiler optimizations.
434463 }
@@ -463,7 +492,7 @@ static int scftorture_invoker(void *arg)
463492
464493 // Make sure that the CPU is affinitized appropriately during testing.
465494 curcpu = raw_smp_processor_id ();
466- WARN_ONCE (curcpu != scfp -> cpu % nr_cpu_ids ,
495+ WARN_ONCE (curcpu != cpu ,
467496 "%s: Wanted CPU %d, running on %d, nr_cpu_ids = %d\n" ,
468497 __func__ , scfp -> cpu , curcpu , nr_cpu_ids );
469498
@@ -479,6 +508,8 @@ static int scftorture_invoker(void *arg)
479508 VERBOSE_SCFTORTOUT ("scftorture_invoker %d started" , scfp -> cpu );
480509
481510 do {
511+ scf_cleanup_free_list (cpu );
512+
482513 scftorture_invoke_one (scfp , & rand );
483514 while (cpu_is_offline (cpu ) && !torture_must_stop ()) {
484515 schedule_timeout_interruptible (HZ / 5 );
@@ -523,12 +554,15 @@ static void scf_torture_cleanup(void)
523554 torture_stop_kthread ("scftorture_invoker" , scf_stats_p [i ].task );
524555 else
525556 goto end ;
526- smp_call_function (scf_cleanup_handler , NULL , 0 );
557+ smp_call_function (scf_cleanup_handler , NULL , 1 );
527558 torture_stop_kthread (scf_torture_stats , scf_torture_stats_task );
528559 scf_torture_stats_print (); // -After- the stats thread is stopped!
529560 kfree (scf_stats_p ); // -After- the last stats print has completed!
530561 scf_stats_p = NULL ;
531562
563+ for (i = 0 ; i < nr_cpu_ids ; i ++ )
564+ scf_cleanup_free_list (i );
565+
532566 if (atomic_read (& n_errs ) || atomic_read (& n_mb_in_errs ) || atomic_read (& n_mb_out_errs ))
533567 scftorture_print_module_parms ("End of test: FAILURE" );
534568 else if (torture_onoff_failures ())
0 commit comments