@@ -395,22 +395,9 @@ int smp_call_function_any(const struct cpumask *mask,
395
395
}
396
396
EXPORT_SYMBOL_GPL (smp_call_function_any );
397
397
398
- /**
399
- * smp_call_function_many(): Run a function on a set of other CPUs.
400
- * @mask: The set of cpus to run on (only runs on online subset).
401
- * @func: The function to run. This must be fast and non-blocking.
402
- * @info: An arbitrary pointer to pass to the function.
403
- * @wait: If true, wait (atomically) until function has completed
404
- * on other CPUs.
405
- *
406
- * If @wait is true, then returns once @func has returned.
407
- *
408
- * You must not call this function with disabled interrupts or from a
409
- * hardware interrupt handler or from a bottom half handler. Preemption
410
- * must be disabled when calling this function.
411
- */
412
- void smp_call_function_many (const struct cpumask * mask ,
413
- smp_call_func_t func , void * info , bool wait )
398
+ static void smp_call_function_many_cond (const struct cpumask * mask ,
399
+ smp_call_func_t func , void * info ,
400
+ bool wait , smp_cond_func_t cond_func )
414
401
{
415
402
struct call_function_data * cfd ;
416
403
int cpu , next_cpu , this_cpu = smp_processor_id ();
@@ -448,7 +435,8 @@ void smp_call_function_many(const struct cpumask *mask,
448
435
449
436
/* Fastpath: do that cpu by itself. */
450
437
if (next_cpu >= nr_cpu_ids ) {
451
- smp_call_function_single (cpu , func , info , wait );
438
+ if (!cond_func || (cond_func && cond_func (cpu , info )))
439
+ smp_call_function_single (cpu , func , info , wait );
452
440
return ;
453
441
}
454
442
@@ -465,6 +453,9 @@ void smp_call_function_many(const struct cpumask *mask,
465
453
for_each_cpu (cpu , cfd -> cpumask ) {
466
454
call_single_data_t * csd = per_cpu_ptr (cfd -> csd , cpu );
467
455
456
+ if (cond_func && !cond_func (cpu , info ))
457
+ continue ;
458
+
468
459
csd_lock (csd );
469
460
if (wait )
470
461
csd -> flags |= CSD_FLAG_SYNCHRONOUS ;
@@ -486,6 +477,26 @@ void smp_call_function_many(const struct cpumask *mask,
486
477
}
487
478
}
488
479
}
480
+
481
+ /**
482
+ * smp_call_function_many(): Run a function on a set of other CPUs.
483
+ * @mask: The set of cpus to run on (only runs on online subset).
484
+ * @func: The function to run. This must be fast and non-blocking.
485
+ * @info: An arbitrary pointer to pass to the function.
486
+ * @wait: If true, wait (atomically) until function has completed
487
+ * on other CPUs.
488
+ *
489
+ * If @wait is true, then returns once @func has returned.
490
+ *
491
+ * You must not call this function with disabled interrupts or from a
492
+ * hardware interrupt handler or from a bottom half handler. Preemption
493
+ * must be disabled when calling this function.
494
+ */
495
+ void smp_call_function_many (const struct cpumask * mask ,
496
+ smp_call_func_t func , void * info , bool wait )
497
+ {
498
+ smp_call_function_many_cond (mask , func , info , wait , NULL );
499
+ }
489
500
EXPORT_SYMBOL (smp_call_function_many );
490
501
491
502
/**
@@ -668,58 +679,34 @@ EXPORT_SYMBOL(on_each_cpu_mask);
668
679
* @info: An arbitrary pointer to pass to both functions.
669
680
* @wait: If true, wait (atomically) until function has
670
681
* completed on other CPUs.
671
- * @gfp_flags: GFP flags to use when allocating the cpumask
672
- * used internally by the function.
673
- *
674
- * The function might sleep if the GFP flags indicates a non
675
- * atomic allocation is allowed.
676
682
*
677
683
* Preemption is disabled to protect against CPUs going offline but not online.
678
684
* CPUs going online during the call will not be seen or sent an IPI.
679
685
*
680
686
* You must not call this function with disabled interrupts or
681
687
* from a hardware interrupt handler or from a bottom half handler.
682
688
*/
683
- void on_each_cpu_cond_mask (bool (* cond_func )(int cpu , void * info ),
684
- smp_call_func_t func , void * info , bool wait ,
685
- gfp_t gfp_flags , const struct cpumask * mask )
689
+ void on_each_cpu_cond_mask (smp_cond_func_t cond_func , smp_call_func_t func ,
690
+ void * info , bool wait , const struct cpumask * mask )
686
691
{
687
- cpumask_var_t cpus ;
688
- int cpu , ret ;
689
-
690
- might_sleep_if (gfpflags_allow_blocking (gfp_flags ));
691
-
692
- if (likely (zalloc_cpumask_var (& cpus , (gfp_flags |__GFP_NOWARN )))) {
693
- preempt_disable ();
694
- for_each_cpu (cpu , mask )
695
- if (cond_func (cpu , info ))
696
- __cpumask_set_cpu (cpu , cpus );
697
- on_each_cpu_mask (cpus , func , info , wait );
698
- preempt_enable ();
699
- free_cpumask_var (cpus );
700
- } else {
701
- /*
702
- * No free cpumask, bother. No matter, we'll
703
- * just have to IPI them one by one.
704
- */
705
- preempt_disable ();
706
- for_each_cpu (cpu , mask )
707
- if (cond_func (cpu , info )) {
708
- ret = smp_call_function_single (cpu , func ,
709
- info , wait );
710
- WARN_ON_ONCE (ret );
711
- }
712
- preempt_enable ();
692
+ int cpu = get_cpu ();
693
+
694
+ smp_call_function_many_cond (mask , func , info , wait , cond_func );
695
+ if (cpumask_test_cpu (cpu , mask ) && cond_func (cpu , info )) {
696
+ unsigned long flags ;
697
+
698
+ local_irq_save (flags );
699
+ func (info );
700
+ local_irq_restore (flags );
713
701
}
702
+ put_cpu ();
714
703
}
715
704
EXPORT_SYMBOL (on_each_cpu_cond_mask );
716
705
717
- void on_each_cpu_cond (bool (* cond_func )(int cpu , void * info ),
718
- smp_call_func_t func , void * info , bool wait ,
719
- gfp_t gfp_flags )
706
+ void on_each_cpu_cond (smp_cond_func_t cond_func , smp_call_func_t func ,
707
+ void * info , bool wait )
720
708
{
721
- on_each_cpu_cond_mask (cond_func , func , info , wait , gfp_flags ,
722
- cpu_online_mask );
709
+ on_each_cpu_cond_mask (cond_func , func , info , wait , cpu_online_mask );
723
710
}
724
711
EXPORT_SYMBOL (on_each_cpu_cond );
725
712
0 commit comments