@@ -96,7 +96,7 @@ static DEFINE_PER_CPU_ALIGNED(struct call_function_data, cfd_data);
96
96
97
97
static DEFINE_PER_CPU_SHARED_ALIGNED (struct llist_head , call_single_queue ) ;
98
98
99
- static void flush_smp_call_function_queue (bool warn_cpu_offline );
99
+ static void __flush_smp_call_function_queue (bool warn_cpu_offline );
100
100
101
101
int smpcfd_prepare_cpu (unsigned int cpu )
102
102
{
@@ -141,7 +141,7 @@ int smpcfd_dying_cpu(unsigned int cpu)
141
141
* ensure that the outgoing CPU doesn't go offline with work
142
142
* still pending.
143
143
*/
144
- flush_smp_call_function_queue (false);
144
+ __flush_smp_call_function_queue (false);
145
145
irq_work_run ();
146
146
return 0 ;
147
147
}
@@ -541,11 +541,11 @@ void generic_smp_call_function_single_interrupt(void)
541
541
{
542
542
cfd_seq_store (this_cpu_ptr (& cfd_seq_local )-> gotipi , CFD_SEQ_NOCPU ,
543
543
smp_processor_id (), CFD_SEQ_GOTIPI );
544
- flush_smp_call_function_queue (true);
544
+ __flush_smp_call_function_queue (true);
545
545
}
546
546
547
547
/**
548
- * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
548
+ * __flush_smp_call_function_queue - Flush pending smp-call-function callbacks
549
549
*
550
550
* @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
551
551
* offline CPU. Skip this check if set to 'false'.
@@ -558,7 +558,7 @@ void generic_smp_call_function_single_interrupt(void)
558
558
* Loop through the call_single_queue and run all the queued callbacks.
559
559
* Must be called with interrupts disabled.
560
560
*/
561
- static void flush_smp_call_function_queue (bool warn_cpu_offline )
561
+ static void __flush_smp_call_function_queue (bool warn_cpu_offline )
562
562
{
563
563
call_single_data_t * csd , * csd_next ;
564
564
struct llist_node * entry , * prev ;
@@ -681,7 +681,20 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
681
681
smp_processor_id (), CFD_SEQ_HDLEND );
682
682
}
683
683
684
- void flush_smp_call_function_from_idle (void )
684
+
685
+ /**
686
+ * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
687
+ * from task context (idle, migration thread)
688
+ *
689
+ * When TIF_POLLING_NRFLAG is supported and a CPU is in idle and has it
690
+ * set, then remote CPUs can avoid sending IPIs and wake the idle CPU by
691
+ * setting TIF_NEED_RESCHED. The idle task on the woken up CPU has to
692
+ * handle queued SMP function calls before scheduling.
693
+ *
694
+ * The migration thread has to ensure that an eventually pending wakeup has
695
+ * been handled before it migrates a task.
696
+ */
697
+ void flush_smp_call_function_queue (void )
685
698
{
686
699
unsigned long flags ;
687
700
@@ -691,7 +704,7 @@ void flush_smp_call_function_from_idle(void)
691
704
cfd_seq_store (this_cpu_ptr (& cfd_seq_local )-> idle , CFD_SEQ_NOCPU ,
692
705
smp_processor_id (), CFD_SEQ_IDLE );
693
706
local_irq_save (flags );
694
- flush_smp_call_function_queue (true);
707
+ __flush_smp_call_function_queue (true);
695
708
if (local_softirq_pending ())
696
709
do_softirq ();
697
710
0 commit comments