@@ -56,61 +56,70 @@ void __weak arch_irq_work_raise(void)
56
56
*/
57
57
}
58
58
59
- /*
60
- * Enqueue the irq_work @work on @cpu unless it's already pending
61
- * somewhere.
62
- *
63
- * Can be re-enqueued while the callback is still in progress.
64
- */
65
- bool irq_work_queue_on (struct irq_work * work , int cpu )
59
+ /* Enqueue on current CPU, work must already be claimed and preempt disabled */
60
+ static void __irq_work_queue_local (struct irq_work * work )
66
61
{
67
- /* All work should have been flushed before going offline */
68
- WARN_ON_ONCE (cpu_is_offline (cpu ));
69
-
70
- #ifdef CONFIG_SMP
71
-
72
- /* Arch remote IPI send/receive backend aren't NMI safe */
73
- WARN_ON_ONCE (in_nmi ());
62
+ /* If the work is "lazy", handle it from next tick if any */
63
+ if (work -> flags & IRQ_WORK_LAZY ) {
64
+ if (llist_add (& work -> llnode , this_cpu_ptr (& lazy_list )) &&
65
+ tick_nohz_tick_stopped ())
66
+ arch_irq_work_raise ();
67
+ } else {
68
+ if (llist_add (& work -> llnode , this_cpu_ptr (& raised_list )))
69
+ arch_irq_work_raise ();
70
+ }
71
+ }
74
72
73
+ /* Enqueue the irq work @work on the current CPU */
74
+ bool irq_work_queue (struct irq_work * work )
75
+ {
75
76
/* Only queue if not already pending */
76
77
if (!irq_work_claim (work ))
77
78
return false;
78
79
79
- if (llist_add (& work -> llnode , & per_cpu (raised_list , cpu )))
80
- arch_send_call_function_single_ipi (cpu );
81
-
82
- #else /* #ifdef CONFIG_SMP */
83
- irq_work_queue (work );
84
- #endif /* #else #ifdef CONFIG_SMP */
80
+ /* Queue the entry and raise the IPI if needed. */
81
+ preempt_disable ();
82
+ __irq_work_queue_local (work );
83
+ preempt_enable ();
85
84
86
85
return true;
87
86
}
87
+ EXPORT_SYMBOL_GPL (irq_work_queue );
88
88
89
- /* Enqueue the irq work @work on the current CPU */
90
- bool irq_work_queue (struct irq_work * work )
89
+ /*
90
+ * Enqueue the irq_work @work on @cpu unless it's already pending
91
+ * somewhere.
92
+ *
93
+ * Can be re-enqueued while the callback is still in progress.
94
+ */
95
+ bool irq_work_queue_on (struct irq_work * work , int cpu )
91
96
{
97
+ #ifndef CONFIG_SMP
98
+ return irq_work_queue (work );
99
+
100
+ #else /* CONFIG_SMP: */
101
+ /* All work should have been flushed before going offline */
102
+ WARN_ON_ONCE (cpu_is_offline (cpu ));
103
+
92
104
/* Only queue if not already pending */
93
105
if (!irq_work_claim (work ))
94
106
return false;
95
107
96
- /* Queue the entry and raise the IPI if needed. */
97
108
preempt_disable ();
98
-
99
- /* If the work is "lazy", handle it from next tick if any */
100
- if (work -> flags & IRQ_WORK_LAZY ) {
101
- if (llist_add (& work -> llnode , this_cpu_ptr (& lazy_list )) &&
102
- tick_nohz_tick_stopped ())
103
- arch_irq_work_raise ();
109
+ if (cpu != smp_processor_id ()) {
110
+ /* Arch remote IPI send/receive backend aren't NMI safe */
111
+ WARN_ON_ONCE (in_nmi ());
112
+ if (llist_add (& work -> llnode , & per_cpu (raised_list , cpu )))
113
+ arch_send_call_function_single_ipi (cpu );
104
114
} else {
105
- if (llist_add (& work -> llnode , this_cpu_ptr (& raised_list )))
106
- arch_irq_work_raise ();
115
+ __irq_work_queue_local (work );
107
116
}
108
-
109
117
preempt_enable ();
110
118
111
119
return true;
120
+ #endif /* CONFIG_SMP */
112
121
}
113
- EXPORT_SYMBOL_GPL ( irq_work_queue );
122
+
114
123
115
124
bool irq_work_needs_cpu (void )
116
125
{
0 commit comments