Skip to content

Commit 2d46aea

Browse files
committed
genirq/chip: Rework handle_edge_irq()
Use the new helpers to decide whether the interrupt should be handled and switch the descriptor locking to guard(). Fixup the kernel doc comment while at it. No functional change. Signed-off-by: Thomas Gleixner <[email protected]> Acked-by: Peter Zijlstra (Intel) <[email protected]> Link: https://lore.kernel.org/all/[email protected]
1 parent 15d772e commit 2d46aea

File tree

1 file changed

+16
-33
lines changed

1 file changed

+16
-33
lines changed

kernel/irq/chip.c

Lines changed: 16 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -742,40 +742,27 @@ void handle_fasteoi_nmi(struct irq_desc *desc)
742742
EXPORT_SYMBOL_GPL(handle_fasteoi_nmi);
743743

744744
/**
745-
* handle_edge_irq - edge type IRQ handler
746-
* @desc: the interrupt description structure for this irq
745+
* handle_edge_irq - edge type IRQ handler
746+
* @desc: the interrupt description structure for this irq
747747
*
748-
* Interrupt occurs on the falling and/or rising edge of a hardware
749-
* signal. The occurrence is latched into the irq controller hardware
750-
* and must be acked in order to be reenabled. After the ack another
751-
* interrupt can happen on the same source even before the first one
752-
* is handled by the associated event handler. If this happens it
753-
* might be necessary to disable (mask) the interrupt depending on the
754-
* controller hardware. This requires to reenable the interrupt inside
755-
* of the loop which handles the interrupts which have arrived while
756-
* the handler was running. If all pending interrupts are handled, the
757-
* loop is left.
748+
* Interrupt occurs on the falling and/or rising edge of a hardware
749+
* signal. The occurrence is latched into the irq controller hardware and
750+
* must be acked in order to be reenabled. After the ack another interrupt
751+
* can happen on the same source even before the first one is handled by
752+
* the associated event handler. If this happens it might be necessary to
753+
* disable (mask) the interrupt depending on the controller hardware. This
754+
* requires to reenable the interrupt inside of the loop which handles the
755+
* interrupts which have arrived while the handler was running. If all
756+
* pending interrupts are handled, the loop is left.
758757
*/
759758
void handle_edge_irq(struct irq_desc *desc)
760759
{
761-
raw_spin_lock(&desc->lock);
762-
763-
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
764-
765-
if (!irq_can_handle_pm(desc)) {
766-
desc->istate |= IRQS_PENDING;
767-
mask_ack_irq(desc);
768-
goto out_unlock;
769-
}
760+
guard(raw_spinlock)(&desc->lock);
770761

771-
/*
772-
* If its disabled or no action available then mask it and get
773-
* out of here.
774-
*/
775-
if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
762+
if (!irq_can_handle(desc)) {
776763
desc->istate |= IRQS_PENDING;
777764
mask_ack_irq(desc);
778-
goto out_unlock;
765+
return;
779766
}
780767

781768
kstat_incr_irqs_this_cpu(desc);
@@ -786,7 +773,7 @@ void handle_edge_irq(struct irq_desc *desc)
786773
do {
787774
if (unlikely(!desc->action)) {
788775
mask_irq(desc);
789-
goto out_unlock;
776+
return;
790777
}
791778

792779
/*
@@ -802,11 +789,7 @@ void handle_edge_irq(struct irq_desc *desc)
802789

803790
handle_irq_event(desc);
804791

805-
} while ((desc->istate & IRQS_PENDING) &&
806-
!irqd_irq_disabled(&desc->irq_data));
807-
808-
out_unlock:
809-
raw_spin_unlock(&desc->lock);
792+
} while ((desc->istate & IRQS_PENDING) && !irqd_irq_disabled(&desc->irq_data));
810793
}
811794
EXPORT_SYMBOL(handle_edge_irq);
812795

0 commit comments

Comments
 (0)