Skip to content

Commit 471ba0e

Browse files
npigginIngo Molnar
authored andcommitted
irq_work: Do not raise an IPI when queueing work on the local CPU
The QEMU PowerPC/PSeries machine model was not expecting a self-IPI, and it may be a bit surprising thing to do, so have irq_work_queue_on do local queueing when target is the current CPU. Suggested-by: Steven Rostedt <[email protected]> Reported-by: Sebastian Andrzej Siewior <[email protected]> Tested-by: Sebastian Andrzej Siewior <[email protected]> Signed-off-by: Nicholas Piggin <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Frederic Weisbecker <[email protected]> Acked-by: Peter Zijlstra (Intel) <[email protected]> Cc: =?UTF-8?q?C=C3=A9dric=20Le=20Goater?= <[email protected]> Cc: Linus Torvalds <[email protected]> Cc: Paul Mackerras <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Suraj Jitindar Singh <[email protected]> Cc: Thomas Gleixner <[email protected]> Link: https://lkml.kernel.org/r/[email protected] [ Simplified the preprocessor comments. Fixed unbalanced curly brackets pointed out by Thomas. ] Signed-off-by: Ingo Molnar <[email protected]>
1 parent 2d65c42 commit 471ba0e

File tree

1 file changed

+42
-33
lines changed

1 file changed

+42
-33
lines changed

kernel/irq_work.c

Lines changed: 42 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -56,61 +56,70 @@ void __weak arch_irq_work_raise(void)
5656
*/
5757
}
5858

59-
/*
60-
* Enqueue the irq_work @work on @cpu unless it's already pending
61-
* somewhere.
62-
*
63-
* Can be re-enqueued while the callback is still in progress.
64-
*/
65-
bool irq_work_queue_on(struct irq_work *work, int cpu)
59+
/* Enqueue on current CPU, work must already be claimed and preempt disabled */
60+
static void __irq_work_queue_local(struct irq_work *work)
6661
{
67-
/* All work should have been flushed before going offline */
68-
WARN_ON_ONCE(cpu_is_offline(cpu));
69-
70-
#ifdef CONFIG_SMP
71-
72-
/* Arch remote IPI send/receive backend aren't NMI safe */
73-
WARN_ON_ONCE(in_nmi());
62+
/* If the work is "lazy", handle it from next tick if any */
63+
if (work->flags & IRQ_WORK_LAZY) {
64+
if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
65+
tick_nohz_tick_stopped())
66+
arch_irq_work_raise();
67+
} else {
68+
if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
69+
arch_irq_work_raise();
70+
}
71+
}
7472

73+
/* Enqueue the irq work @work on the current CPU */
74+
bool irq_work_queue(struct irq_work *work)
75+
{
7576
/* Only queue if not already pending */
7677
if (!irq_work_claim(work))
7778
return false;
7879

79-
if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
80-
arch_send_call_function_single_ipi(cpu);
81-
82-
#else /* #ifdef CONFIG_SMP */
83-
irq_work_queue(work);
84-
#endif /* #else #ifdef CONFIG_SMP */
80+
/* Queue the entry and raise the IPI if needed. */
81+
preempt_disable();
82+
__irq_work_queue_local(work);
83+
preempt_enable();
8584

8685
return true;
8786
}
87+
EXPORT_SYMBOL_GPL(irq_work_queue);
8888

89-
/* Enqueue the irq work @work on the current CPU */
90-
bool irq_work_queue(struct irq_work *work)
89+
/*
90+
* Enqueue the irq_work @work on @cpu unless it's already pending
91+
* somewhere.
92+
*
93+
* Can be re-enqueued while the callback is still in progress.
94+
*/
95+
bool irq_work_queue_on(struct irq_work *work, int cpu)
9196
{
97+
#ifndef CONFIG_SMP
98+
return irq_work_queue(work);
99+
100+
#else /* CONFIG_SMP: */
101+
/* All work should have been flushed before going offline */
102+
WARN_ON_ONCE(cpu_is_offline(cpu));
103+
92104
/* Only queue if not already pending */
93105
if (!irq_work_claim(work))
94106
return false;
95107

96-
/* Queue the entry and raise the IPI if needed. */
97108
preempt_disable();
98-
99-
/* If the work is "lazy", handle it from next tick if any */
100-
if (work->flags & IRQ_WORK_LAZY) {
101-
if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
102-
tick_nohz_tick_stopped())
103-
arch_irq_work_raise();
109+
if (cpu != smp_processor_id()) {
110+
/* Arch remote IPI send/receive backend aren't NMI safe */
111+
WARN_ON_ONCE(in_nmi());
112+
if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
113+
arch_send_call_function_single_ipi(cpu);
104114
} else {
105-
if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
106-
arch_irq_work_raise();
115+
__irq_work_queue_local(work);
107116
}
108-
109117
preempt_enable();
110118

111119
return true;
120+
#endif /* CONFIG_SMP */
112121
}
113-
EXPORT_SYMBOL_GPL(irq_work_queue);
122+
114123

115124
bool irq_work_needs_cpu(void)
116125
{

0 commit comments

Comments
 (0)